diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
335 files changed, 65880 insertions, 37919 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 9375e7f12420..74a8105fd2c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -34,15 +34,6 @@ config DRM_AMDGPU_USERPTR This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it isn't already selected to enabled full userptr support. -config DRM_AMDGPU_GART_DEBUGFS - bool "Allow GART access through debugfs" - depends on DRM_AMDGPU - depends on DEBUG_FS - default n - help - Selecting this option creates a debugfs file to inspect the mapped - pages. Uses more memory for housekeeping, enable only for debugging. - source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/amd/display/Kconfig" source "drivers/gpu/drm/amd/amdkfd/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index ca0e435559d5..653726588956 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -30,7 +30,7 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME) ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/amdgpu \ - -I$(FULL_AMD_PATH)/powerplay/inc \ + -I$(FULL_AMD_PATH)/pm/inc \ -I$(FULL_AMD_PATH)/acp/include \ -I$(FULL_AMD_DISPLAY_PATH) \ -I$(FULL_AMD_DISPLAY_PATH)/include \ @@ -47,28 +47,34 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ - amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ + atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ - amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ - amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ + amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \ + amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \ + amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o \ + amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ - amdgpu_umc.o smu_v11_0_i2c.o + amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ + amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \ + amdgpu_eeprom.o amdgpu_mca.o + +amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o # add asic specific block -amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ +amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \ dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o -amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o +amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \ + uvd_v3_1.o amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ - vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ - arct_reg_init.o navi12_reg_init.o mxgpu_nv.o + vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ + nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o # add DF block amdgpu-y += \ @@ -80,11 +86,12 @@ amdgpu-y += \ gmc_v7_0.o \ gmc_v8_0.o \ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \ - gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o + gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \ + mmhub_v1_7.o # add UMC block amdgpu-y += \ - umc_v6_1.o umc_v6_0.o + umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o # add IH block amdgpu-y += \ @@ -94,6 +101,7 @@ amdgpu-y += \ tonga_ih.o \ cz_ih.o \ vega10_ih.o \ + vega20_ih.o \ navi10_ih.o # add PSP block @@ -102,17 +110,15 @@ amdgpu-y += \ psp_v3_1.o \ psp_v10_0.o \ psp_v11_0.o \ - psp_v12_0.o - -# add SMC block -amdgpu-y += \ - amdgpu_dpm.o + psp_v11_0_8.o \ + psp_v12_0.o \ + psp_v13_0.o # add DCE block amdgpu-y += \ dce_v10_0.o \ dce_v11_0.o \ - dce_virtual.o + amdgpu_vkms.o # add GFX block amdgpu-y += \ @@ -120,6 +126,8 @@ amdgpu-y += \ amdgpu_rlc.o \ gfx_v8_0.o \ gfx_v9_0.o \ + gfx_v9_4.o \ + gfx_v9_4_2.o \ gfx_v10_0.o # add async DMA block @@ -128,7 +136,9 @@ amdgpu-y += \ sdma_v2_4.o \ sdma_v3_0.o \ sdma_v4_0.o \ - sdma_v5_0.o + sdma_v4_4.o \ + sdma_v5_0.o \ + sdma_v5_2.o # add MES block amdgpu-y += \ @@ -147,32 +157,57 @@ amdgpu-y += \ vce_v3_0.o \ vce_v4_0.o -# add VCN block +# add VCN and JPEG block amdgpu-y += \ amdgpu_vcn.o \ vcn_v1_0.o \ vcn_v2_0.o \ - vcn_v2_5.o + vcn_v2_5.o \ + vcn_v3_0.o \ + amdgpu_jpeg.o \ + jpeg_v1_0.o \ + jpeg_v2_0.o \ + jpeg_v2_5.o \ + jpeg_v3_0.o # add ATHUB block amdgpu-y += \ athub_v1_0.o \ - athub_v2_0.o + athub_v2_0.o \ + athub_v2_1.o + +# add SMUIO block +amdgpu-y += \ + smuio_v9_0.o \ + smuio_v11_0.o \ + smuio_v11_0_6.o \ + smuio_v13_0.o + +# add reset block +amdgpu-y += \ + amdgpu_reset.o + +# add MCA block +amdgpu-y += \ + mca_v3_0.o # add amdkfd interfaces amdgpu-y += amdgpu_amdkfd.o + ifneq ($(CONFIG_HSA_AMD),) AMDKFD_PATH := ../amdkfd include $(FULL_AMD_PATH)/amdkfd/Makefile amdgpu-y += $(AMDKFD_FILES) amdgpu-y += \ - amdgpu_amdkfd_fence.o \ - amdgpu_amdkfd_gpuvm.o \ - amdgpu_amdkfd_gfx_v8.o \ - amdgpu_amdkfd_gfx_v9.o \ - amdgpu_amdkfd_arcturus.o \ - amdgpu_amdkfd_gfx_v10.o + amdgpu_amdkfd_fence.o \ + amdgpu_amdkfd_gpuvm.o \ + amdgpu_amdkfd_gfx_v8.o \ + amdgpu_amdkfd_gfx_v9.o \ + amdgpu_amdkfd_arcturus.o \ + amdgpu_amdkfd_aldebaran.o \ + amdgpu_amdkfd_gfx_v10.o \ + amdgpu_amdkfd_gfx_v10_3.o ifneq ($(CONFIG_DRM_AMDGPU_CIK),) amdgpu-y += amdgpu_amdkfd_gfx_v7.o @@ -201,7 +236,7 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o -include $(FULL_AMD_PATH)/powerplay/Makefile +include $(FULL_AMD_PATH)/pm/Makefile amdgpu-y += $(AMD_POWERPLAY_FILES) diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c new file mode 100644 index 000000000000..bcfdb63b1d42 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -0,0 +1,409 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "aldebaran.h" +#include "amdgpu_reset.h" +#include "amdgpu_amdkfd.h" +#include "amdgpu_dpm.h" +#include "amdgpu_job.h" +#include "amdgpu_ring.h" +#include "amdgpu_ras.h" +#include "amdgpu_psp.h" +#include "amdgpu_xgmi.h" + +static struct amdgpu_reset_handler * +aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_reset_handler *handler; + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + if (reset_context->method != AMD_RESET_METHOD_NONE) { + dev_dbg(adev->dev, "Getting reset handler for method %d\n", + reset_context->method); + list_for_each_entry(handler, &reset_ctl->reset_handlers, + handler_list) { + if (handler->reset_method == reset_context->method) + return handler; + } + } + + if (adev->gmc.xgmi.connected_to_cpu) { + list_for_each_entry(handler, &reset_ctl->reset_handlers, + handler_list) { + if (handler->reset_method == AMD_RESET_METHOD_MODE2) { + reset_context->method = AMD_RESET_METHOD_MODE2; + return handler; + } + } + } + + dev_dbg(adev->dev, "Reset handler not found!\n"); + + return NULL; +} + +static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) +{ + int r, i; + + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!(adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA)) + continue; + + r = adev->ip_blocks[i].version->funcs->suspend(adev); + + if (r) { + dev_err(adev->dev, + "suspend of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + + adev->ip_blocks[i].status.hw = false; + } + + return r; +} + +static int +aldebaran_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + int r = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + dev_dbg(adev->dev, "Aldebaran prepare hw context\n"); + /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ + if (!amdgpu_sriov_vf(adev)) + r = aldebaran_mode2_suspend_ip(adev); + + return r; +} + +static void aldebaran_async_reset(struct work_struct *work) +{ + struct amdgpu_reset_handler *handler; + struct amdgpu_reset_control *reset_ctl = + container_of(work, struct amdgpu_reset_control, reset_work); + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + list_for_each_entry(handler, &reset_ctl->reset_handlers, + handler_list) { + if (handler->reset_method == reset_ctl->active_reset) { + dev_dbg(adev->dev, "Resetting device\n"); + handler->do_reset(adev); + break; + } + } +} + +static int aldebaran_mode2_reset(struct amdgpu_device *adev) +{ + /* disable BM */ + pci_clear_master(adev->pdev); + adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev); + return adev->asic_reset_res; +} + +static int +aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_device *tmp_adev = NULL; + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + int r = 0; + + dev_dbg(adev->dev, "aldebaran perform hw reset\n"); + if (reset_context->hive == NULL) { + /* Wrong context, return error */ + return -EINVAL; + } + + list_for_each_entry(tmp_adev, &reset_context->hive->device_list, + gmc.xgmi.head) { + mutex_lock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2; + } + /* + * Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch + * them together so that they can be completed asynchronously on multiple nodes + */ + list_for_each_entry(tmp_adev, &reset_context->hive->device_list, + gmc.xgmi.head) { + /* For XGMI run all resets in parallel to speed up the process */ + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work(system_unbound_wq, + &tmp_adev->reset_cntl->reset_work)) + r = -EALREADY; + } else + r = aldebaran_mode2_reset(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", + r, adev_to_drm(tmp_adev)->unique); + break; + } + } + + /* For XGMI wait for all resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, + &reset_context->hive->device_list, + gmc.xgmi.head) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->reset_cntl->reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + } + + list_for_each_entry(tmp_adev, &reset_context->hive->device_list, + gmc.xgmi.head) { + mutex_unlock(&tmp_adev->reset_cntl->reset_lock); + tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE; + } + + return r; +} + +static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) +{ + struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM]; + struct amdgpu_firmware_info *ucode; + struct amdgpu_ip_block *cmn_block; + int ucode_count = 0; + int i, r; + + dev_dbg(adev->dev, "Reloading ucodes after reset\n"); + for (i = 0; i < adev->firmware.max_ucodes; i++) { + ucode = &adev->firmware.ucode[i]; + if (!ucode->fw) + continue; + switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_SDMA0: + case AMDGPU_UCODE_ID_SDMA1: + case AMDGPU_UCODE_ID_SDMA2: + case AMDGPU_UCODE_ID_SDMA3: + case AMDGPU_UCODE_ID_SDMA4: + case AMDGPU_UCODE_ID_SDMA5: + case AMDGPU_UCODE_ID_SDMA6: + case AMDGPU_UCODE_ID_SDMA7: + case AMDGPU_UCODE_ID_CP_MEC1: + case AMDGPU_UCODE_ID_CP_MEC1_JT: + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: + case AMDGPU_UCODE_ID_RLC_G: + ucode_list[ucode_count++] = ucode; + break; + default: + break; + } + } + + /* Reinit NBIF block */ + cmn_block = + amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_COMMON); + if (unlikely(!cmn_block)) { + dev_err(adev->dev, "Failed to get BIF handle\n"); + return -EINVAL; + } + r = cmn_block->version->funcs->resume(adev); + if (r) + return r; + + /* Reinit GFXHUB */ + adev->gfxhub.funcs->init(adev); + r = adev->gfxhub.funcs->gart_enable(adev); + if (r) { + dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n"); + return r; + } + + /* Reload GFX firmware */ + r = psp_load_fw_list(&adev->psp, ucode_list, ucode_count); + if (r) { + dev_err(adev->dev, "GFX ucode load failed after reset\n"); + return r; + } + + /* Resume RLC, FW needs RLC alive to complete reset process */ + adev->gfx.rlc.funcs->resume(adev); + + /* Wait for FW reset event complete */ + r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0); + if (r) { + dev_err(adev->dev, + "Failed to get response from firmware after reset\n"); + return r; + } + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!(adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA)) + continue; + r = adev->ip_blocks[i].version->funcs->resume(adev); + if (r) { + dev_err(adev->dev, + "resume of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + + adev->ip_blocks[i].status.hw = true; + } + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!(adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA || + adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_COMMON)) + continue; + + if (adev->ip_blocks[i].version->funcs->late_init) { + r = adev->ip_blocks[i].version->funcs->late_init( + (void *)adev); + if (r) { + dev_err(adev->dev, + "late_init of IP block <%s> failed %d after reset\n", + adev->ip_blocks[i].version->funcs->name, + r); + return r; + } + } + adev->ip_blocks[i].status.late_initialized = true; + } + + amdgpu_ras_set_error_query_ready(adev, true); + + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); + + return r; +} + +static int +aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *reset_context) +{ + int r; + struct amdgpu_device *tmp_adev = NULL; + + if (reset_context->hive == NULL) { + /* Wrong context, return error */ + return -EINVAL; + } + + list_for_each_entry(tmp_adev, &reset_context->hive->device_list, + gmc.xgmi.head) { + dev_info(tmp_adev->dev, + "GPU reset succeeded, trying to resume\n"); + r = aldebaran_mode2_restore_ip(tmp_adev); + if (r) + goto end; + + /* + * Add this ASIC as tracked as reset was already + * complete successfully. + */ + amdgpu_register_gpu_instance(tmp_adev); + + /* Resume RAS */ + amdgpu_ras_resume(tmp_adev); + + /* Update PSP FW topology after reset */ + if (reset_context->hive && + tmp_adev->gmc.xgmi.num_physical_nodes > 1) + r = amdgpu_xgmi_update_topology(reset_context->hive, + tmp_adev); + + if (!r) { + amdgpu_irq_gpu_reset_resume_helper(tmp_adev); + + r = amdgpu_ib_ring_tests(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, + "ib ring test failed (%d).\n", r); + r = -EAGAIN; + tmp_adev->asic_reset_res = r; + goto end; + } + } + } + +end: + return r; +} + +static struct amdgpu_reset_handler aldebaran_mode2_handler = { + .reset_method = AMD_RESET_METHOD_MODE2, + .prepare_env = NULL, + .prepare_hwcontext = aldebaran_mode2_prepare_hwcontext, + .perform_reset = aldebaran_mode2_perform_reset, + .restore_hwcontext = aldebaran_mode2_restore_hwcontext, + .restore_env = NULL, + .do_reset = aldebaran_mode2_reset, +}; + +int aldebaran_reset_init(struct amdgpu_device *adev) +{ + struct amdgpu_reset_control *reset_ctl; + + reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL); + if (!reset_ctl) + return -ENOMEM; + + reset_ctl->handle = adev; + reset_ctl->async_reset = aldebaran_async_reset; + reset_ctl->active_reset = AMD_RESET_METHOD_NONE; + reset_ctl->get_reset_handler = aldebaran_get_reset_handler; + + INIT_LIST_HEAD(&reset_ctl->reset_handlers); + INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset); + /* Only mode2 is handled through reset control now */ + amdgpu_reset_add_handler(reset_ctl, &aldebaran_mode2_handler); + + adev->reset_cntl = reset_ctl; + + return 0; +} + +int aldebaran_reset_fini(struct amdgpu_device *adev) +{ + kfree(adev->reset_cntl); + adev->reset_cntl = NULL; + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.h b/drivers/gpu/drm/amd/amdgpu/aldebaran.h new file mode 100644 index 000000000000..a07db5454d49 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.h @@ -0,0 +1,32 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __ALDEBARAN_H__ +#define __ALDEBARAN_H__ + +#include "amdgpu.h" + +int aldebaran_reset_init(struct amdgpu_device *adev); +int aldebaran_reset_fini(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c index a786d159e5e9..28e6c9ab8767 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c @@ -1,5 +1,5 @@ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,32 +21,34 @@ * */ #include "amdgpu.h" -#include "nv.h" +#include "soc15.h" #include "soc15_common.h" -#include "navi12_ip_offset.h" +#include "aldebaran_ip_offset.h" -int navi12_reg_base_init(struct amdgpu_device *adev) +int aldebaran_reg_base_init(struct amdgpu_device *adev) { - /* HW has more IP blocks, only initialized the blocks needed by driver */ + /* HW has more IP blocks, only initialized the block needed by our driver */ uint32_t i; for (i = 0 ; i < MAX_INSTANCE ; ++i) { adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i])); + adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(SDMA2_BASE.instance[i])); + adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(SDMA3_BASE.instance[i])); + adev->reg_offset[SDMA4_HWIP][i] = (uint32_t *)(&(SDMA4_BASE.instance[i])); adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); + adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0c229a92a24b..b85b67a88a3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -28,6 +28,18 @@ #ifndef __AMDGPU_H__ #define __AMDGPU_H__ +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "amdgpu: " fmt + +#ifdef dev_fmt +#undef dev_fmt +#endif + +#define dev_fmt(fmt) "amdgpu: " fmt + #include "amdgpu_ctx.h" #include <linux/atomic.h> @@ -37,11 +49,12 @@ #include <linux/rbtree.h> #include <linux/hashtable.h> #include <linux/dma-fence.h> +#include <linux/pci.h> +#include <linux/aer.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_execbuf_util.h> #include <drm/amdgpu_drm.h> @@ -69,11 +82,13 @@ #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "amdgpu_vcn.h" +#include "amdgpu_jpeg.h" #include "amdgpu_mn.h" #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" #include "amdgpu_nbio.h" +#include "amdgpu_hdp.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_csa.h" @@ -89,6 +104,11 @@ #include "amdgpu_mes.h" #include "amdgpu_umc.h" #include "amdgpu_mmhub.h" +#include "amdgpu_gfxhub.h" +#include "amdgpu_df.h" +#include "amdgpu_smuio.h" +#include "amdgpu_fdinfo.h" +#include "amdgpu_mca.h" #define MAX_GPU_INSTANCE 16 @@ -105,6 +125,23 @@ struct amdgpu_mgpu_info uint32_t num_gpu; uint32_t num_dgpu; uint32_t num_apu; + + /* delayed reset_func for XGMI configuration if necessary */ + struct delayed_work delayed_reset_work; + bool pending_reset; +}; + +enum amdgpu_ss { + AMDGPU_SS_DRV_LOAD, + AMDGPU_SS_DEV_D0, + AMDGPU_SS_DEV_D3, + AMDGPU_SS_DRV_UNLOAD +}; + +struct amdgpu_watchdog_timer +{ + bool timeout_fatal_disable; + uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ }; #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 @@ -158,29 +195,45 @@ extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; +extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; +extern uint amdgpu_freesync_vid_mode; +extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dm_abm_level; +extern int amdgpu_backlight; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; +extern int amdgpu_bad_page_threshold; +extern bool amdgpu_ignore_bad_page_threshold; +extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; extern int amdgpu_async_gfx_ring; extern int amdgpu_mcbp; extern int amdgpu_discovery; extern int amdgpu_mes; extern int amdgpu_noretry; extern int amdgpu_force_asic_type; +extern int amdgpu_smartshift_bias; #ifdef CONFIG_HSA_AMD extern int sched_policy; +extern bool debug_evictions; +extern bool no_system_mem_limit; #else -static const int sched_policy = KFD_SCHED_POLICY_HWS; +static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS; +static const bool __maybe_unused debug_evictions; /* = false */ +static const bool __maybe_unused no_system_mem_limit; #endif +extern int amdgpu_tmz; +extern int amdgpu_reset_method; + #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; #endif #ifdef CONFIG_DRM_AMDGPU_CIK extern int amdgpu_cik_support; #endif +extern int amdgpu_num_kcq; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) @@ -188,12 +241,12 @@ extern int amdgpu_cik_support; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) -/* AMDGPU_IB_POOL_SIZE must be a power of 2 */ -#define AMDGPU_IB_POOL_SIZE 16 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 #define AMDGPUFB_CONN_LIMIT 4 #define AMDGPU_BIOS_NUM_SCRATCH 16 +#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ + /* hard reset data */ #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b @@ -218,6 +271,10 @@ extern int amdgpu_cik_support; #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 +/* smasrt shift bias level limits */ +#define AMDGPU_SMARTSHIFT_MAX_BIAS (100) +#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) + struct amdgpu_device; struct amdgpu_ib; struct amdgpu_cs_parser; @@ -225,8 +282,10 @@ struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; struct amdgpu_bo_va_mapping; -struct amdgpu_atif; struct kfd_vm_fault_info; +struct amdgpu_hive_info; +struct amdgpu_reset_context; +struct amdgpu_reset_control; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, @@ -257,7 +316,7 @@ enum amdgpu_kiq_irq { #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ -#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ +#define MAX_KIQ_REG_TRY 1000 int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, @@ -437,7 +496,9 @@ struct amdgpu_fpriv { int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned size, struct amdgpu_ib *ib); + unsigned size, + enum amdgpu_ib_pool_type pool, + struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct dma_fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, @@ -510,7 +571,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, /* * Writeback */ -#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ +#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */ struct amdgpu_wb { struct amdgpu_bo *wb_obj; @@ -543,11 +604,33 @@ struct amdgpu_allowed_register_entry { }; enum amd_reset_method { + AMD_RESET_METHOD_NONE = -1, AMD_RESET_METHOD_LEGACY = 0, AMD_RESET_METHOD_MODE0, AMD_RESET_METHOD_MODE1, AMD_RESET_METHOD_MODE2, - AMD_RESET_METHOD_BACO + AMD_RESET_METHOD_BACO, + AMD_RESET_METHOD_PCI, +}; + +struct amdgpu_video_codec_info { + u32 codec_type; + u32 max_width; + u32 max_height; + u32 max_pixels_per_frame; + u32 max_level; +}; + +#define codec_info_build(type, width, height, level) \ + .codec_type = type,\ + .max_width = width,\ + .max_height = height,\ + .max_pixels_per_frame = height * width,\ + .max_level = level, + +struct amdgpu_video_codecs { + const u32 codec_count; + const struct amdgpu_video_codec_info *codec_array; }; /* @@ -588,6 +671,15 @@ struct amdgpu_asic_funcs { bool (*need_reset_on_init)(struct amdgpu_device *adev); /* PCIe replay counter */ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); + /* device supports BACO */ + bool (*supports_baco)(struct amdgpu_device *adev); + /* pre asic_init quirks */ + void (*pre_asic_init)(struct amdgpu_device *adev); + /* enter/exit umd stable pstate */ + int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter); + /* query video codecs */ + int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, + const struct amdgpu_video_codecs **codecs); }; /* @@ -611,35 +703,6 @@ struct amdgpu_vram_scratch { }; /* - * ACPI - */ -struct amdgpu_atcs_functions { - bool get_ext_state; - bool pcie_perf_req; - bool pcie_dev_rdy; - bool pcie_bus_width; -}; - -struct amdgpu_atcs { - struct amdgpu_atcs_functions functions; -}; - -/* - * Firmware VRAM reservation - */ -struct amdgpu_fw_vram_usage { - u64 start_offset; - u64 size; - struct amdgpu_bo *reserved_bo; - void *va; - - /* Offset on the top of VRAM, used as c2p write buffer. - */ - u64 mem_train_fb_loc; - bool mem_train_support; -}; - -/* * CGS */ struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); @@ -662,29 +725,6 @@ struct amdgpu_mmio_remap { resource_size_t bus_addr; }; -struct amdgpu_df_funcs { - void (*sw_init)(struct amdgpu_device *adev); - void (*sw_fini)(struct amdgpu_device *adev); - void (*enable_broadcast_mode)(struct amdgpu_device *adev, - bool enable); - u32 (*get_fb_channel_number)(struct amdgpu_device *adev); - u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, - bool enable); - void (*get_clockgating_state)(struct amdgpu_device *adev, - u32 *flags); - void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, - bool enable); - int (*pmc_start)(struct amdgpu_device *adev, uint64_t config, - int is_enable); - int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config, - int is_disable); - void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config, - uint64_t *count); - uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); - void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, - uint32_t ficadl_val, uint32_t ficadh_val); -}; /* Define the HW IP blocks will be used in driver , add more if necessary */ enum amd_hw_ip_block_type { GC_HWIP = 1, @@ -704,6 +744,8 @@ enum amd_hw_ip_block_type { MP1_HWIP, UVD_HWIP, VCN_HWIP = UVD_HWIP, + JPEG_HWIP = VCN_HWIP, + VCN1_HWIP, VCE_HWIP, DF_HWIP, DCE_HWIP, @@ -715,33 +757,78 @@ enum amd_hw_ip_block_type { CLK_HWIP, UMC_HWIP, RSMU_HWIP, + XGMI_HWIP, + DCI_HWIP, MAX_HWIP }; -#define HWIP_MAX_INSTANCE 8 +#define HWIP_MAX_INSTANCE 10 + +#define HW_ID_MAX 300 +#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv)) struct amd_powerplay { void *pp_handle; const struct amd_pm_funcs *pp_funcs; }; +/* polaris10 kickers */ +#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ + ((rid == 0xE3) || \ + (rid == 0xE4) || \ + (rid == 0xE5) || \ + (rid == 0xE7) || \ + (rid == 0xEF))) || \ + ((did == 0x6FDF) && \ + ((rid == 0xE7) || \ + (rid == 0xEF) || \ + (rid == 0xFF)))) + +#define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \ + ((rid == 0xE1) || \ + (rid == 0xF7))) + +/* polaris11 kickers */ +#define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \ + ((rid == 0xE0) || \ + (rid == 0xE5))) || \ + ((did == 0x67FF) && \ + ((rid == 0xCF) || \ + (rid == 0xEF) || \ + (rid == 0xFF)))) + +#define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \ + ((rid == 0xE2))) + +/* polaris12 kickers */ +#define ASICID_IS_P23(did, rid) (((did == 0x6987) && \ + ((rid == 0xC0) || \ + (rid == 0xC1) || \ + (rid == 0xC3) || \ + (rid == 0xC7))) || \ + ((did == 0x6981) && \ + ((rid == 0x00) || \ + (rid == 0x01) || \ + (rid == 0x10)))) + #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 struct amdgpu_device { struct device *dev; - struct drm_device *ddev; struct pci_dev *pdev; + struct drm_device ddev; #ifdef CONFIG_DRM_AMD_ACP struct amdgpu_acp acp; #endif - + struct amdgpu_hive_info *hive; /* ASIC */ enum amd_asic_type asic_type; uint32_t family; uint32_t rev_id; uint32_t external_rev_id; unsigned long flags; + unsigned long apu_flags; int usec_timeout; const struct amdgpu_asic_funcs *asic_funcs; bool shutdown; @@ -749,14 +836,8 @@ struct amdgpu_device { bool accel_working; struct notifier_block acpi_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; - struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; - unsigned debugfs_count; -#if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_preempt; - struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; -#endif - struct amdgpu_atif *atif; - struct amdgpu_atcs atcs; + struct debugfs_blob_wrapper debugfs_vbios_blob; + struct debugfs_blob_wrapper debugfs_discovery_blob; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ struct mutex grbm_idx_mutex; @@ -768,8 +849,6 @@ struct amdgpu_device { bool is_atom_fw; uint8_t *bios; uint32_t bios_size; - struct amdgpu_bo *stolen_vga_memory; - struct amdgpu_bo *discovery_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -812,8 +891,6 @@ struct amdgpu_device { spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; amdgpu_block_wreg_t audio_endpt_wreg; - void __iomem *rio_mem; - resource_size_t rio_mem_size; struct amdgpu_doorbell doorbell; /* clock/pll info */ @@ -848,20 +925,25 @@ struct amdgpu_device { /* display */ bool enable_virtual_display; + struct amdgpu_vkms_output *amdgpu_vkms_output; struct amdgpu_mode_info mode_info; /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; + struct amdgpu_irq_src vline0_irq; struct amdgpu_irq_src vupdate_irq; struct amdgpu_irq_src pageflip_irq; struct amdgpu_irq_src hpd_irq; + struct amdgpu_irq_src dmub_trace_irq; + struct amdgpu_irq_src dmub_outbox_irq; /* rings */ u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; - struct amdgpu_sa_manager ring_tmp_bo; + struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; + struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; /* interrupts */ struct amdgpu_irq irq; @@ -881,9 +963,18 @@ struct amdgpu_device { /* nbio */ struct amdgpu_nbio nbio; + /* hdp */ + struct amdgpu_hdp hdp; + + /* smuio */ + struct amdgpu_smuio smuio; + /* mmhub */ struct amdgpu_mmhub mmhub; + /* gfxhub */ + struct amdgpu_gfxhub gfxhub; + /* gfx */ struct amdgpu_gfx gfx; @@ -899,6 +990,9 @@ struct amdgpu_device { /* vcn */ struct amdgpu_vcn vcn; + /* jpeg */ + struct amdgpu_jpeg jpeg; + /* firmwares */ struct amdgpu_firmware firmware; @@ -917,14 +1011,18 @@ struct amdgpu_device { /* display related functionality */ struct amdgpu_display_manager dm; - /* discovery */ - uint8_t *discovery; - /* mes */ bool enable_mes; struct amdgpu_mes mes; + /* df */ + struct amdgpu_df df; + + /* MCA */ + struct amdgpu_mca mca; + struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; + uint32_t harvest_ip_mask; int num_ip_blocks; struct mutex mn_lock; DECLARE_HASHTABLE(mn_hash, 7); @@ -935,23 +1033,16 @@ struct amdgpu_device { atomic64_t gart_pin_size; /* soc15 register offset based on ip, instance and segment */ - uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; - - const struct amdgpu_df_funcs *df_funcs; + uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; struct amdgpu_virt virt; - /* firmware VRAM reservation */ - struct amdgpu_fw_vram_usage fw_vram_usage; /* link all shadow bo */ struct list_head shadow_list; struct mutex shadow_list_lock; - /* keep an lru list of rings by HW IP */ - struct list_head ring_lru_list; - spinlock_t ring_lru_list_lock; /* record hw reset is performed */ bool has_hw_reset; @@ -959,18 +1050,20 @@ struct amdgpu_device { /* s3/s4 mask */ bool in_suspend; + bool in_s3; + bool in_s4; + bool in_s0ix; - /* record last mm index being written through WREG32*/ - unsigned long last_mm_index; - bool in_gpu_reset; + atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; - struct mutex lock_reset; + struct rw_semaphore reset_sem; struct amdgpu_doorbell_index doorbell_index; struct mutex notifier_lock; int asic_reset_res; struct work_struct xgmi_reset_work; + struct list_head reset_list; long gfx_timeout; long sdma_timeout; @@ -980,57 +1073,113 @@ struct amdgpu_device { uint64_t unique_id; uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; - /* device pstate */ - int pstate; + /* enable runtime pm on the device */ + bool runpm; + bool in_runpm; + bool has_pr3; + + bool pm_sysfs_en; + bool ucode_sysfs_en; + + /* Chip product information */ + char product_number[16]; + char product_name[32]; + char serial[20]; + + atomic_t throttling_logging_enabled; + struct ratelimit_state throttling_logging_rs; + uint32_t ras_hw_enabled; + uint32_t ras_enabled; + + bool no_hw_access; + struct pci_saved_state *pci_state; + pci_channel_state_t pci_channel_state; + + struct amdgpu_reset_control *reset_cntl; + uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE]; }; -static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) +static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) +{ + return container_of(ddev, struct amdgpu_device, ddev); +} + +static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) +{ + return &adev->ddev; +} + +static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) { return container_of(bdev, struct amdgpu_device, mman.bdev); } int amdgpu_device_init(struct amdgpu_device *adev, - struct drm_device *ddev, - struct pci_dev *pdev, uint32_t flags); -void amdgpu_device_fini(struct amdgpu_device *adev); +void amdgpu_device_fini_hw(struct amdgpu_device *adev); +void amdgpu_device_fini_sw(struct amdgpu_device *adev); + int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); +void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write); +size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write); + void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, - uint32_t *buf, size_t size, bool write); -uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, + void *buf, size_t size, bool write); +uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t acc_flags); +void amdgpu_device_wreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, uint32_t acc_flags); -void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, - uint32_t acc_flags); +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, + uint32_t reg, uint32_t v); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); -u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); -void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); +u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr); +u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr); +void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr, u32 reg_data); +void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr, u64 reg_data); bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); +int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context); + +int amdgpu_do_asic_reset(struct list_head *device_list_handle, + struct amdgpu_reset_context *reset_context); + int emu_soc_asic_init(struct amdgpu_device *adev); /* * Registers read & write functions. */ - -#define AMDGPU_REGS_IDX (1<<0) #define AMDGPU_REGS_NO_KIQ (1<<1) +#define AMDGPU_REGS_RLC (1<<2) -#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) -#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) +#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) +#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) + +#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg)) +#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v)) #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) -#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) -#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) -#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) -#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) -#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX) +#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0) +#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0)) +#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) @@ -1067,9 +1216,16 @@ int emu_soc_asic_init(struct amdgpu_device *adev); tmp_ |= ((val) & ~(mask)); \ WREG32_PLL(reg, tmp_); \ } while (0) -#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) -#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) -#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) + +#define WREG32_SMC_P(_Reg, _Val, _Mask) \ + do { \ + u32 tmp = RREG32_SMC(_Reg); \ + tmp &= (_Mask); \ + tmp |= ((_Val) & ~(_Mask)); \ + WREG32_SMC(_Reg, tmp); \ + } while (0) + +#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false)) #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK @@ -1110,20 +1266,32 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) -#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) -#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) +#define amdgpu_asic_flush_hdp(adev, r) \ + ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) +#define amdgpu_asic_invalidate_hdp(adev, r) \ + ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : (adev)->hdp.funcs->invalidate_hdp((adev), (r))) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) +#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) +#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev)) +#define amdgpu_asic_update_umd_stable_pstate(adev, enter) \ + ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0) +#define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c)) + #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) + /* Common functions */ +bool amdgpu_device_has_job_running(struct amdgpu_device *adev); bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); +int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, @@ -1133,9 +1301,21 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); -bool amdgpu_device_is_px(struct drm_device *dev); +int amdgpu_device_mode1_reset(struct amdgpu_device *adev); +bool amdgpu_device_supports_atpx(struct drm_device *dev); +bool amdgpu_device_supports_px(struct drm_device *dev); +bool amdgpu_device_supports_boco(struct drm_device *dev); +bool amdgpu_device_supports_smart_shift(struct drm_device *dev); +bool amdgpu_device_supports_baco(struct drm_device *dev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +int amdgpu_device_baco_enter(struct drm_device *dev); +int amdgpu_device_baco_exit(struct drm_device *dev); + +void amdgpu_device_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring); +void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -1166,20 +1346,24 @@ static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; extern const int amdgpu_max_kms_ioctl; -int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); +int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); void amdgpu_driver_unload_kms(struct drm_device *dev); void amdgpu_driver_lastclose_kms(struct drm_device *dev); int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); +void amdgpu_driver_release_kms(struct drm_device *dev); + int amdgpu_device_ip_suspend(struct amdgpu_device *adev); -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); -u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); -int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); -void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); +int amdgpu_device_resume(struct drm_device *dev, bool fbcon); +u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); +int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); +void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +int amdgpu_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); /* * functions used by amdgpu_encoder.c @@ -1201,19 +1385,38 @@ struct amdgpu_afmt_acr { struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); /* amdgpu_acpi.c */ + +/* ATCS Device/Driver State */ +#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 +#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 +#define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0 +#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1 + #if defined(CONFIG_ACPI) int amdgpu_acpi_init(struct amdgpu_device *adev); void amdgpu_acpi_fini(struct amdgpu_device *adev); bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); +bool amdgpu_acpi_is_power_shift_control_supported(void); int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise); +int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, + u8 dev_state, bool drv_state); +int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); -void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, - struct amdgpu_dm_backlight_caps *caps); +void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } +static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +static inline void amdgpu_acpi_detect(void) { } +static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } +static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, + u8 dev_state, bool drv_state) { return 0; } +static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, + enum amdgpu_ss ss_state) { return 0; } #endif int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, @@ -1230,20 +1433,31 @@ static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return void amdgpu_register_gpu_instance(struct amdgpu_device *adev); void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); +pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev); +pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev); +void amdgpu_pci_resume(struct pci_dev *pdev); + +bool amdgpu_device_cache_pci_state(struct pci_dev *pdev); +bool amdgpu_device_load_pci_state(struct pci_dev *pdev); + +bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev); + +int amdgpu_device_set_cg_state(struct amdgpu_device *adev, + enum amd_clockgating_state state); +int amdgpu_device_set_pg_state(struct amdgpu_device *adev, + enum amd_powergating_state state); + #include "amdgpu_object.h" -/* used by df_v3_6.c and amdgpu_pmu.c */ -#define AMDGPU_PMU_ATTR(_name, _object) \ -static ssize_t \ -_name##_show(struct device *dev, \ - struct device_attribute *attr, \ - char *page) \ -{ \ - BUILD_BUG_ON(sizeof(_object) >= PAGE_SIZE - 1); \ - return sprintf(page, _object "\n"); \ -} \ - \ -static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name) +static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) +{ + return adev->gmc.tmz_enabled; +} +static inline int amdgpu_in_reset(struct amdgpu_device *adev) +{ + return atomic_read(&adev->in_gpu_reset); +} #endif - diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 82155ac3288a..cc9c9f8b23b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -36,17 +36,17 @@ #include "acp_gfx_if.h" -#define ACP_TILE_ON_MASK 0x03 -#define ACP_TILE_OFF_MASK 0x02 -#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f -#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 +#define ACP_TILE_ON_MASK 0x03 +#define ACP_TILE_OFF_MASK 0x02 +#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f +#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 -#define ACP_TILE_P1_MASK 0x3e -#define ACP_TILE_P2_MASK 0x3d -#define ACP_TILE_DSP0_MASK 0x3b -#define ACP_TILE_DSP1_MASK 0x37 +#define ACP_TILE_P1_MASK 0x3e +#define ACP_TILE_P2_MASK 0x3d +#define ACP_TILE_DSP0_MASK 0x3b +#define ACP_TILE_DSP1_MASK 0x37 -#define ACP_TILE_DSP2_MASK 0x2f +#define ACP_TILE_DSP2_MASK 0x2f #define ACP_DMA_REGS_END 0x146c0 #define ACP_I2S_PLAY_REGS_START 0x14840 @@ -75,8 +75,8 @@ #define mmACP_CONTROL 0x5131 #define mmACP_STATUS 0x5133 #define mmACP_SOFT_RESET 0x5134 -#define ACP_CONTROL__ClkEn_MASK 0x1 -#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100 +#define ACP_CONTROL__ClkEn_MASK 0x1 +#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF @@ -136,9 +136,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd) * 2. power off the acp tiles * 3. check and enter ulv state */ - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); } return 0; } @@ -157,38 +155,47 @@ static int acp_poweron(struct generic_pm_domain *genpd) * 2. turn on acp clock * 3. power on acp tiles */ - if (adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); } return 0; } -static struct device *get_mfd_cell_dev(const char *device_name, int r) +static int acp_genpd_add_device(struct device *dev, void *data) +{ + struct generic_pm_domain *gpd = data; + int ret; + + ret = pm_genpd_add_device(gpd, dev); + if (ret) + dev_err(dev, "Failed to add dev to genpd %d\n", ret); + + return ret; +} + +static int acp_genpd_remove_device(struct device *dev, void *data) { - char auto_dev_name[25]; - struct device *dev; + int ret; - snprintf(auto_dev_name, sizeof(auto_dev_name), - "%s.%d.auto", device_name, r); - dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); - dev_info(dev, "device %s added to pm domain\n", auto_dev_name); + ret = pm_genpd_remove_device(dev); + if (ret) + dev_err(dev, "Failed to remove dev from genpd %d\n", ret); - return dev; + /* Continue to remove */ + return 0; } /** * acp_hw_init - start and test ACP block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * */ static int acp_hw_init(void *handle) { - int r, i; + int r; uint64_t acp_base; u32 val = 0; u32 count = 0; - struct device *dev; struct i2s_platform_data *i2s_pdata = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -344,15 +351,10 @@ static int acp_hw_init(void *handle) if (r) goto failure; - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); - if (r) { - dev_err(dev, "Failed to add dev to genpd\n"); - goto failure; - } - } - + r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd, + acp_genpd_add_device); + if (r) + goto failure; /* Assert Soft reset of ACP */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); @@ -408,15 +410,13 @@ failure: /** * acp_hw_fini - stop the hardware block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * */ static int acp_hw_fini(void *handle) { - int i, ret; u32 val = 0; u32 count = 0; - struct device *dev; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ @@ -461,13 +461,8 @@ static int acp_hw_fini(void *handle) udelay(100); } - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - ret = pm_genpd_remove_device(dev); - /* If removal fails, dont giveup and try rest */ - if (ret) - dev_err(dev, "remove dev from genpd failed\n"); - } + device_for_each_child(adev->acp.parent, NULL, + acp_genpd_remove_device); mfd_remove_devices(adev->acp.parent); kfree(adev->acp.acp_res); @@ -527,11 +522,9 @@ static int acp_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = state == AMD_PG_STATE_GATE ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 1e41367ef74e..4811b0faafd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -26,7 +26,9 @@ #include <linux/slab.h> #include <linux/power_supply.h> #include <linux/pm_runtime.h> +#include <linux/suspend.h> #include <acpi/video.h> +#include <acpi/actbl.h> #include <drm/drm_crtc_helper.h> #include "amdgpu.h" @@ -64,16 +66,37 @@ struct amdgpu_atif { struct amdgpu_atif_notifications notifications; struct amdgpu_atif_functions functions; struct amdgpu_atif_notification_cfg notification_cfg; - struct amdgpu_encoder *encoder_for_bl; +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + struct backlight_device *bd; +#endif struct amdgpu_dm_backlight_caps backlight_caps; }; +struct amdgpu_atcs_functions { + bool get_ext_state; + bool pcie_perf_req; + bool pcie_dev_rdy; + bool pcie_bus_width; + bool power_shift_control; +}; + +struct amdgpu_atcs { + acpi_handle handle; + + struct amdgpu_atcs_functions functions; +}; + +static struct amdgpu_acpi_priv { + struct amdgpu_atif atif; + struct amdgpu_atcs atcs; +} amdgpu_acpi_priv; + /* Call the ATIF method */ /** * amdgpu_atif_call - call an ATIF method * - * @handle: acpi handle + * @atif: atif structure * @function: the ATIF function to execute * @params: ATIF function params * @@ -163,7 +186,6 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas /** * amdgpu_atif_verify_interface - verify ATIF * - * @handle: acpi handle * @atif: amdgpu atif struct * * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function @@ -205,40 +227,10 @@ out: return err; } -static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle) -{ - acpi_handle handle = NULL; - char acpi_method_name[255] = { 0 }; - struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; - acpi_status status; - - /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only - * systems, ATIF is in the dGPU's namespace. - */ - status = acpi_get_handle(dhandle, "ATIF", &handle); - if (ACPI_SUCCESS(status)) - goto out; - - if (amdgpu_has_atpx()) { - status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF", - &handle); - if (ACPI_SUCCESS(status)) - goto out; - } - - DRM_DEBUG_DRIVER("No ATIF handle found\n"); - return NULL; -out: - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); - return handle; -} - /** * amdgpu_atif_get_notification_params - determine notify configuration * - * @handle: acpi handle - * @n: atif notification configuration struct + * @atif: acpi handle * * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function * to determine if a notifier is used and if so which one @@ -301,7 +293,7 @@ out: /** * amdgpu_atif_query_backlight_caps - get min and max backlight input signal * - * @handle: acpi handle + * @atif: acpi handle * * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function * to determine the acceptable range of backlight values @@ -360,7 +352,7 @@ out: /** * amdgpu_atif_get_sbios_requests - get requested sbios event * - * @handle: acpi handle + * @atif: acpi handle * @req: atif sbios request struct * * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function @@ -413,7 +405,7 @@ out: static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) { - struct amdgpu_atif *atif = adev->atif; + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; int count; DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", @@ -423,8 +415,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, return NOTIFY_DONE; /* Is this actually our event? */ - if (!atif || - !atif->notification_cfg.enabled || + if (!atif->notification_cfg.enabled || event->type != atif->notification_cfg.command_code) { /* These events will generate keypresses otherwise */ if (event->type == ACPI_VIDEO_NOTIFY_PROBE) @@ -444,32 +435,28 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); - /* todo: add DC handling */ - if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) && - !amdgpu_device_has_dc_support(adev)) { - struct amdgpu_encoder *enc = atif->encoder_for_bl; - - if (enc) { - struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; - + if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + if (atif->bd) { DRM_DEBUG_DRIVER("Changing brightness to %d\n", req.backlight_level); - - amdgpu_display_backlight_set_level(adev, enc, req.backlight_level); - -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - backlight_force_update(dig->bl_dev, - BACKLIGHT_UPDATE_HOTKEY); -#endif + /* + * XXX backlight_device_set_brightness() is + * hardwired to post BACKLIGHT_UPDATE_SYSFS. + * It probably should accept 'reason' parameter. + */ + backlight_device_set_brightness(atif->bd, req.backlight_level); } +#endif } + if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { if (adev->flags & AMD_IS_PX) { - pm_runtime_get_sync(adev->ddev->dev); + pm_runtime_get_sync(adev_to_drm(adev)->dev); /* Just fire off a uevent and let userspace tell us what to do */ - drm_helper_hpd_irq_event(adev->ddev); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + drm_helper_hpd_irq_event(adev_to_drm(adev)); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } } /* TODO: check other events */ @@ -488,14 +475,15 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, /** * amdgpu_atcs_call - call an ATCS method * - * @handle: acpi handle + * @atcs: atcs structure * @function: the ATCS function to execute * @params: ATCS function params * * Executes the requested ATCS function (all asics). * Returns a pointer to the acpi output buffer. */ -static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function, +static union acpi_object *amdgpu_atcs_call(struct amdgpu_atcs *atcs, + int function, struct acpi_buffer *params) { acpi_status status; @@ -519,7 +507,7 @@ static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function, atcs_arg_elements[1].integer.value = 0; } - status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer); + status = acpi_evaluate_object(atcs->handle, NULL, &atcs_arg, &buffer); /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { @@ -548,12 +536,12 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED; f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED; f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED; + f->power_shift_control = mask & ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED; } /** * amdgpu_atcs_verify_interface - verify ATCS * - * @handle: acpi handle * @atcs: amdgpu atcs struct * * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function @@ -561,15 +549,14 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas * (all asics). * returns 0 on success, error on failure. */ -static int amdgpu_atcs_verify_interface(acpi_handle handle, - struct amdgpu_atcs *atcs) +static int amdgpu_atcs_verify_interface(struct amdgpu_atcs *atcs) { union acpi_object *info; struct atcs_verify_interface output; size_t size; int err = 0; - info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; @@ -606,7 +593,7 @@ out: */ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev) { - struct amdgpu_atcs *atcs = &adev->atcs; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy) return true; @@ -615,6 +602,18 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade } /** + * amdgpu_acpi_is_power_shift_control_supported + * + * Check if the ATCS power shift control method + * is supported. + * returns true if supported, false if not. + */ +bool amdgpu_acpi_is_power_shift_control_supported(void) +{ + return amdgpu_acpi_priv.atcs.functions.power_shift_control; +} + +/** * amdgpu_acpi_pcie_notify_device_ready * * @adev: amdgpu_device pointer @@ -625,19 +624,13 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade */ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev) { - acpi_handle handle; union acpi_object *info; - struct amdgpu_atcs *atcs = &adev->atcs; - - /* Get the device handle */ - handle = ACPI_HANDLE(&adev->pdev->dev); - if (!handle) - return -EINVAL; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; if (!atcs->functions.pcie_dev_rdy) return -EINVAL; - info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL); + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL); if (!info) return -EIO; @@ -660,9 +653,8 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev) int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise) { - acpi_handle handle; union acpi_object *info; - struct amdgpu_atcs *atcs = &adev->atcs; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; struct atcs_pref_req_input atcs_input; struct atcs_pref_req_output atcs_output; struct acpi_buffer params; @@ -672,11 +664,6 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, if (amdgpu_acpi_pcie_notify_device_ready(adev)) return -EINVAL; - /* Get the device handle */ - handle = ACPI_HANDLE(&adev->pdev->dev); - if (!handle) - return -EINVAL; - if (!atcs->functions.pcie_perf_req) return -EINVAL; @@ -694,7 +681,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, params.pointer = &atcs_input; while (retry--) { - info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms); + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms); if (!info) return -EIO; @@ -728,6 +715,96 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, } /** + * amdgpu_acpi_power_shift_control + * + * @adev: amdgpu_device pointer + * @dev_state: device acpi state + * @drv_state: driver state + * + * Executes the POWER_SHIFT_CONTROL method to + * communicate current dGPU device state and + * driver state to APU/SBIOS. + * returns 0 on success, error on failure. + */ +int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, + u8 dev_state, bool drv_state) +{ + union acpi_object *info; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; + struct atcs_pwr_shift_input atcs_input; + struct acpi_buffer params; + + if (!amdgpu_acpi_is_power_shift_control_supported()) + return -EINVAL; + + atcs_input.size = sizeof(struct atcs_pwr_shift_input); + /* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ + atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8); + atcs_input.dev_acpi_state = dev_state; + atcs_input.drv_state = drv_state; + + params.length = sizeof(struct atcs_pwr_shift_input); + params.pointer = &atcs_input; + + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, ¶ms); + if (!info) { + DRM_ERROR("ATCS PSC update failed\n"); + return -EIO; + } + + return 0; +} + +/** + * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS + * + * @dev: drm_device pointer + * @ss_state: current smart shift event + * + * returns 0 on success, + * otherwise return error number. + */ +int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int r; + + if (!amdgpu_device_supports_smart_shift(dev)) + return 0; + + switch (ss_state) { + /* SBIOS trigger “stop”, “enable” and “start” at D0, Driver Operational. + * SBIOS trigger “stop” at D3, Driver Not Operational. + * SBIOS trigger “stop” and “disable” at D0, Driver NOT operational. + */ + case AMDGPU_SS_DRV_LOAD: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D0, + AMDGPU_ATCS_PSC_DRV_STATE_OPR); + break; + case AMDGPU_SS_DEV_D0: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D0, + AMDGPU_ATCS_PSC_DRV_STATE_OPR); + break; + case AMDGPU_SS_DEV_D3: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT, + AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR); + break; + case AMDGPU_SS_DRV_UNLOAD: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D0, + AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR); + break; + default: + return -EINVAL; + } + + return r; +} + +/** * amdgpu_acpi_event - handle notify events * * @nb: notifier block @@ -770,61 +847,158 @@ static int amdgpu_acpi_event(struct notifier_block *nb, */ int amdgpu_acpi_init(struct amdgpu_device *adev) { - acpi_handle handle, atif_handle; - struct amdgpu_atif *atif; - struct amdgpu_atcs *atcs = &adev->atcs; - int ret; + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; - /* Get the device handle */ - handle = ACPI_HANDLE(&adev->pdev->dev); +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + if (atif->notifications.brightness_change) { + if (amdgpu_device_has_dc_support(adev)) { +#if defined(CONFIG_DRM_AMD_DC) + struct amdgpu_display_manager *dm = &adev->dm; + if (dm->backlight_dev[0]) + atif->bd = dm->backlight_dev[0]; +#endif + } else { + struct drm_encoder *tmp; + + /* Find the encoder controlling the brightness */ + list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list, + head) { + struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp); + + if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && + enc->enc_priv) { + struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; + if (dig->bl_dev) { + atif->bd = dig->bl_dev; + break; + } + } + } + } + } +#endif + adev->acpi_nb.notifier_call = amdgpu_acpi_event; + register_acpi_notifier(&adev->acpi_nb); - if (!adev->bios || !handle) - return 0; + return 0; +} - /* Call the ATCS method */ - ret = amdgpu_atcs_verify_interface(handle, atcs); +void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) +{ + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; + + caps->caps_valid = atif->backlight_caps.caps_valid; + caps->min_input_signal = atif->backlight_caps.min_input_signal; + caps->max_input_signal = atif->backlight_caps.max_input_signal; +} + +/** + * amdgpu_acpi_fini - tear down driver acpi support + * + * @adev: amdgpu_device pointer + * + * Unregisters with the acpi notifier chain (all asics). + */ +void amdgpu_acpi_fini(struct amdgpu_device *adev) +{ + unregister_acpi_notifier(&adev->acpi_nb); +} + +/** + * amdgpu_atif_pci_probe_handle - look up the ATIF handle + * + * @pdev: pci device + * + * Look up the ATIF handles (all asics). + * Returns true if the handle is found, false if not. + */ +static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + acpi_handle dhandle, atif_handle; + acpi_status status; + int ret; + + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "ATIF", &atif_handle); + if (ACPI_FAILURE(status)) { + return false; + } + amdgpu_acpi_priv.atif.handle = atif_handle; + acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); + ret = amdgpu_atif_verify_interface(&amdgpu_acpi_priv.atif); if (ret) { - DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); + amdgpu_acpi_priv.atif.handle = 0; + return false; } + return true; +} - /* Probe for ATIF, and initialize it if found */ - atif_handle = amdgpu_atif_probe_handle(handle); - if (!atif_handle) - goto out; +/** + * amdgpu_atcs_pci_probe_handle - look up the ATCS handle + * + * @pdev: pci device + * + * Look up the ATCS handles (all asics). + * Returns true if the handle is found, false if not. + */ +static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; + acpi_handle dhandle, atcs_handle; + acpi_status status; + int ret; - atif = kzalloc(sizeof(*atif), GFP_KERNEL); - if (!atif) { - DRM_WARN("Not enough memory to initialize ATIF\n"); - goto out; - } - atif->handle = atif_handle; + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; - /* Call the ATIF method */ - ret = amdgpu_atif_verify_interface(atif); + status = acpi_get_handle(dhandle, "ATCS", &atcs_handle); + if (ACPI_FAILURE(status)) { + return false; + } + amdgpu_acpi_priv.atcs.handle = atcs_handle; + acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name); + ret = amdgpu_atcs_verify_interface(&amdgpu_acpi_priv.atcs); if (ret) { - DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); - kfree(atif); - goto out; + amdgpu_acpi_priv.atcs.handle = 0; + return false; } - adev->atif = atif; + return true; +} - if (atif->notifications.brightness_change) { - struct drm_encoder *tmp; - - /* Find the encoder controlling the brightness */ - list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list, - head) { - struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp); - - if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && - enc->enc_priv) { - struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; - if (dig->bl_dev) { - atif->encoder_for_bl = enc; - break; - } - } - } +/* + * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods + * + * Check if we have the ATIF/ATCS methods and populate + * the structures in the driver. + */ +void amdgpu_acpi_detect(void) +{ + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; + struct pci_dev *pdev = NULL; + int ret; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + if (!atif->handle) + amdgpu_atif_pci_probe_handle(pdev); + if (!atcs->handle) + amdgpu_atcs_pci_probe_handle(pdev); + } + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + if (!atif->handle) + amdgpu_atif_pci_probe_handle(pdev); + if (!atcs->handle) + amdgpu_atcs_pci_probe_handle(pdev); } if (atif->functions.sbios_requests && !atif->functions.system_params) { @@ -855,35 +1029,22 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) } else { atif->backlight_caps.caps_valid = false; } - -out: - adev->acpi_nb.notifier_call = amdgpu_acpi_event; - register_acpi_notifier(&adev->acpi_nb); - - return ret; -} - -void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, - struct amdgpu_dm_backlight_caps *caps) -{ - if (!adev->atif) { - caps->caps_valid = false; - return; - } - caps->caps_valid = adev->atif->backlight_caps.caps_valid; - caps->min_input_signal = adev->atif->backlight_caps.min_input_signal; - caps->max_input_signal = adev->atif->backlight_caps.max_input_signal; } /** - * amdgpu_acpi_fini - tear down driver acpi support + * amdgpu_acpi_is_s0ix_active * - * @adev: amdgpu_device pointer + * @adev: amdgpu_device_pointer * - * Unregisters with the acpi notifier chain (all asics). + * returns true if supported, false if not. */ -void amdgpu_acpi_fini(struct amdgpu_device *adev) +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { - unregister_acpi_notifier(&adev->acpi_nb); - kfree(adev->atif); +#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND) + if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { + if (adev->flags & AMD_IS_APU) + return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; + } +#endif + return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index d3da9dde4ee1..7077f21f0021 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -21,6 +21,7 @@ */ #include "amdgpu_amdkfd.h" +#include "amd_pcie.h" #include "amd_shared.h" #include "amdgpu.h" @@ -29,44 +30,49 @@ #include <linux/module.h> #include <linux/dma-buf.h> #include "amdgpu_xgmi.h" - -static const unsigned int compute_vmid_bitmap = 0xFF00; +#include <uapi/linux/kfd_ioctl.h> +#include "amdgpu_ras.h" +#include "amdgpu_umc.h" /* Total memory size in system memory and all GPU VRAM. Used to * estimate worst case amount of memory to reserve for page tables */ uint64_t amdgpu_amdkfd_total_mem_size; +static bool kfd_initialized; + int amdgpu_amdkfd_init(void) { struct sysinfo si; int ret; si_meminfo(&si); - amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh; + amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh; amdgpu_amdkfd_total_mem_size *= si.mem_unit; -#ifdef CONFIG_HSA_AMD ret = kgd2kfd_init(); amdgpu_amdkfd_gpuvm_init_mem_limits(); -#else - ret = -ENOENT; -#endif + kfd_initialized = !ret; return ret; } void amdgpu_amdkfd_fini(void) { - kgd2kfd_exit(); + if (kfd_initialized) { + kgd2kfd_exit(); + kfd_initialized = false; + } } void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { bool vf = amdgpu_sriov_vf(adev); - adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, - adev->pdev, adev->asic_type, vf); + if (!kfd_initialized) + return; + + adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -112,13 +118,15 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) if (adev->kfd.dev) { struct kgd2kfd_shared_resources gpu_resources = { - .compute_vmid_bitmap = compute_vmid_bitmap, + .compute_vmid_bitmap = + ((1 << AMDGPU_NUM_VMID) - 1) - + ((1 << adev->vm_manager.first_kfd_vmid) - 1), .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe, .gpuvm_size = min(adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GMC_HOLE_START), - .drm_render_minor = adev->ddev->render->index, + .drm_render_minor = adev_to_drm(adev)->render->index, .sdma_doorbell_idx = adev->doorbell_index.sdma_engine, }; @@ -126,7 +134,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) /* this is going to have a few of the MSBs set that we need to * clear */ - bitmap_complement(gpu_resources.queue_bitmap, + bitmap_complement(gpu_resources.cp_queue_bitmap, adev->gfx.mec.queue_bitmap, KGD_MAX_QUEUES); @@ -137,7 +145,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) - clear_bit(i, gpu_resources.queue_bitmap); + clear_bit(i, gpu_resources.cp_queue_bitmap); amdgpu_doorbell_get_kfd_info(adev, &gpu_resources.doorbell_physical_address, @@ -159,11 +167,12 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->doorbell_index.last_non_cp; } - kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources); + adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, + adev_to_drm(adev), &gpu_resources); } } -void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev) +void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev) { if (adev->kfd.dev) { kgd2kfd_device_exit(adev->kfd.dev); @@ -178,18 +187,28 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); } -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) { if (adev->kfd.dev) - kgd2kfd_suspend(adev->kfd.dev); + kgd2kfd_suspend(adev->kfd.dev, run_pm); } -int amdgpu_amdkfd_resume(struct amdgpu_device *adev) +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev) { int r = 0; if (adev->kfd.dev) - r = kgd2kfd_resume(adev->kfd.dev); + r = kgd2kfd_resume_iommu(adev->kfd.dev); + + return r; +} + +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) +{ + int r = 0; + + if (adev->kfd.dev) + r = kgd2kfd_resume(adev->kfd.dev, run_pm); return r; } @@ -224,7 +243,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr, bool mqd_gfx9) + void **cpu_ptr, bool cp_mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; @@ -239,9 +258,10 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); - if (mqd_gfx9) - bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; + if (cp_mqd_gfx9) + bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9; r = amdgpu_bo_create(adev, &bp, &bo); if (r) { @@ -310,6 +330,7 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; + struct amdgpu_bo_user *ubo; struct amdgpu_bo_param bp; int r; @@ -320,14 +341,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; bp.type = ttm_bo_type_device; bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); - r = amdgpu_bo_create(adev, &bp, &bo); + r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) { dev_err(adev->dev, "failed to allocate gws BO for amdkfd (%d)\n", r); return r; } + bo = &ubo->bo; *mem_obj = bo; return 0; } @@ -380,29 +403,23 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : - ~((1ULL << 32) - 1); - resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size; memset(mem_info, 0, sizeof(*mem_info)); - if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) { - mem_info->local_mem_size_public = adev->gmc.visible_vram_size; - mem_info->local_mem_size_private = adev->gmc.real_vram_size - - adev->gmc.visible_vram_size; - } else { - mem_info->local_mem_size_public = 0; - mem_info->local_mem_size_private = adev->gmc.real_vram_size; - } + + mem_info->local_mem_size_public = adev->gmc.visible_vram_size; + mem_info->local_mem_size_private = adev->gmc.real_vram_size - + adev->gmc.visible_vram_size; + mem_info->vram_width = adev->gmc.vram_width; - pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n", - &adev->gmc.aper_base, &aper_limit, + pr_debug("Address base: %pap public 0x%llx private 0x%llx\n", + &adev->gmc.aper_base, mem_info->local_mem_size_public, mem_info->local_mem_size_private); if (amdgpu_sriov_vf(adev)) mem_info->mem_clk_max = adev->clock.default_mclk / 100; - else if (adev->powerplay.pp_funcs) { + else if (adev->pm.dpm_enabled) { if (amdgpu_emu_mode == 1) mem_info->mem_clk_max = 0; else @@ -427,7 +444,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) /* the sclk is in quantas of 10kHz */ if (amdgpu_sriov_vf(adev)) return adev->clock.default_sclk / 100; - else if (adev->powerplay.pp_funcs) + else if (adev->pm.dpm_enabled) return amdgpu_dpm_get_sclk(adev, false) / 100; else return 100; @@ -478,11 +495,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, goto out_put; obj = dma_buf->priv; - if (obj->dev->driver != adev->ddev->driver) + if (obj->dev->driver != adev_to_drm(adev)->driver) /* Can't handle buffers from different drivers */ goto out_put; - adev = obj->dev->dev_private; + adev = drm_to_adev(obj->dev); bo = gem_to_amdgpu_bo(obj); if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT))) @@ -494,17 +511,16 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, *dma_buf_kgd = (struct kgd_dev *)adev; if (bo_size) *bo_size = amdgpu_bo_size(bo); - if (metadata_size) - *metadata_size = bo->metadata_size; if (metadata_buffer) r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size, metadata_size, &metadata_flags); if (flags) { *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM + : KFD_IOC_ALLOC_MEM_FLAGS_GTT; if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - *flags |= ALLOC_MEM_FLAGS_PUBLIC; + *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; } out_put: @@ -515,8 +531,9 @@ out_put: uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + return amdgpu_vram_mgr_usage(vram_man); } uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd) @@ -525,6 +542,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd) return adev->gmc.xgmi.hive_id; } + +uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + return adev->unique_id; +} + uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src) { struct amdgpu_device *peer_adev = (struct amdgpu_device *)src; @@ -540,6 +565,88 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s return (uint8_t)ret; } +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev; + int num_links; + + if (adev->asic_type != CHIP_ALDEBARAN) + return 0; + + if (src) + peer_adev = (struct amdgpu_device *)src; + + /* num links returns 0 for indirect peers since indirect route is unknown. */ + num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev); + if (num_links < 0) { + DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n", + adev->gmc.xgmi.physical_node_id, + peer_adev->gmc.xgmi.physical_node_id, num_links); + num_links = 0; + } + + /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */ + return (num_links * 16 * 25000)/BITS_PER_BYTE; +} + +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; + int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) : + fls(adev->pm.pcie_mlw_mask)) - 1; + int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask & + CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) : + fls(adev->pm.pcie_gen_mask & + CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1; + uint32_t num_lanes_mask = 1 << num_lanes_shift; + uint32_t gen_speed_mask = 1 << gen_speed_shift; + int num_lanes_factor = 0, gen_speed_mbits_factor = 0; + + switch (num_lanes_mask) { + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1: + num_lanes_factor = 1; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2: + num_lanes_factor = 2; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4: + num_lanes_factor = 4; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8: + num_lanes_factor = 8; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12: + num_lanes_factor = 12; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16: + num_lanes_factor = 16; + break; + case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32: + num_lanes_factor = 32; + break; + } + + switch (gen_speed_mask) { + case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1: + gen_speed_mbits_factor = 2500; + break; + case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2: + gen_speed_mbits_factor = 5000; + break; + case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3: + gen_speed_mbits_factor = 8000; + break; + case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4: + gen_speed_mbits_factor = 16000; + break; + case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5: + gen_speed_mbits_factor = 32000; + break; + } + + return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE; +} + uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; @@ -554,6 +661,20 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd) return adev->gds.gws_size; } +uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + return adev->rev_id; +} + +int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + return adev->gmc.noretry; +} + int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len) @@ -595,6 +716,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, job->vmid = vmid; ret = amdgpu_ib_schedule(ring, 1, ib, job, &f); + if (ret) { DRM_ERROR("amdgpu: failed to schedule IB.\n"); goto err_ib_sched; @@ -603,7 +725,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, ret = dma_fence_wait(f, false); err_ib_sched: - dma_fence_put(f); amdgpu_job_free(job); err: return ret; @@ -613,104 +734,62 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (is_support_sw_smu(adev)) - smu_switch_power_profile(&adev->smu, - PP_SMC_POWER_PROFILE_COMPUTE, - !idle); - else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->switch_power_profile) - amdgpu_dpm_switch_power_profile(adev, - PP_SMC_POWER_PROFILE_COMPUTE, - !idle); + amdgpu_dpm_switch_power_profile(adev, + PP_SMC_POWER_PROFILE_COMPUTE, + !idle); } bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) { - if (adev->kfd.dev) { - if ((1 << vmid) & compute_vmid_bitmap) - return true; - } + if (adev->kfd.dev) + return vmid >= adev->vm_manager.first_kfd_vmid; return false; } -bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - return adev->have_atomics_support; -} - -#ifndef CONFIG_HSA_AMD -bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) -{ - return false; -} - -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) -{ -} - -void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm) -{ -} + if (adev->family == AMDGPU_FAMILY_AI) { + int i; -struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) -{ - return NULL; -} + for (i = 0; i < adev->num_vmhubs; i++) + amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); + } else { + amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0); + } -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) -{ return 0; } -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - unsigned int asic_type, bool vf) -{ - return NULL; -} - -bool kgd2kfd_device_init(struct kfd_dev *kfd, - struct drm_device *ddev, - const struct kgd2kfd_shared_resources *gpu_resources) -{ - return false; -} - -void kgd2kfd_device_exit(struct kfd_dev *kfd) -{ -} - -void kgd2kfd_exit(void) +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, + enum TLB_FLUSH_TYPE flush_type) { -} + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + bool all_hub = false; -void kgd2kfd_suspend(struct kfd_dev *kfd) -{ -} + if (adev->family == AMDGPU_FAMILY_AI) + all_hub = true; -int kgd2kfd_resume(struct kfd_dev *kfd) -{ - return 0; + return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); } -int kgd2kfd_pre_reset(struct kfd_dev *kfd) +bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) { - return 0; -} + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; -int kgd2kfd_post_reset(struct kfd_dev *kfd) -{ - return 0; + return adev->have_atomics_support; } -void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) +void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd) { -} + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + struct ras_err_data err_data = {0, 0, 0, NULL}; -void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) -{ + /* CPU MCA will handle page retirement if connected_to_cpu is 1 */ + if (!adev->gmc.xgmi.connected_to_cpu) + amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL); + else + amdgpu_amdkfd_gpu_reset(kgd); } -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 069d5d230810..751557af09bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -27,6 +27,7 @@ #include <linux/types.h> #include <linux/mm.h> +#include <linux/kthread.h> #include <linux/workqueue.h> #include <kgd_kfd_interface.h> #include <drm/ttm/ttm_execbuf_util.h> @@ -35,13 +36,26 @@ extern uint64_t amdgpu_amdkfd_total_mem_size; +enum TLB_FLUSH_TYPE { + TLB_FLUSH_LEGACY = 0, + TLB_FLUSH_LIGHTWEIGHT, + TLB_FLUSH_HEAVYWEIGHT +}; + struct amdgpu_device; -struct kfd_bo_va_list { - struct list_head bo_list; - struct amdgpu_bo_va *bo_va; - void *kgd_dev; +enum kfd_mem_attachment_type { + KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ + KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ + KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ +}; + +struct kfd_mem_attachment { + struct list_head list; + enum kfd_mem_attachment_type type; bool is_mapped; + struct amdgpu_bo_va *bo_va; + struct amdgpu_device *adev; uint64_t va; uint64_t pte_flags; }; @@ -49,7 +63,8 @@ struct kfd_bo_va_list { struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; - struct list_head bo_va_list; + struct dma_buf *dmabuf; + struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct ttm_validate_buffer validate_list; struct ttm_validate_buffer resv_list; @@ -65,6 +80,7 @@ struct kgd_mem { struct amdgpu_sync sync; bool aql_queue; + bool is_imported; }; /* KFD Memory Eviction */ @@ -73,11 +89,13 @@ struct amdgpu_amdkfd_fence { struct mm_struct *mm; spinlock_t lock; char timeline_name[TASK_COMM_LEN]; + struct svm_range_bo *svm_bo; }; struct amdgpu_kfd_dev { struct kfd_dev *dev; uint64_t vram_used; + bool init_complete; }; enum kgd_engine_type { @@ -92,10 +110,6 @@ enum kgd_engine_type { KGD_ENGINE_MAX }; -struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm); -bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); -struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); struct amdkfd_process_info { /* List head of all VMs that belong to a KFD process */ @@ -122,20 +136,22 @@ struct amdkfd_process_info { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); -int amdgpu_amdkfd_resume(struct amdgpu_device *adev); +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev); +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); -void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev); - -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); +void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, + enum TLB_FLUSH_TYPE flush_type); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); @@ -145,6 +161,42 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); +int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, + int queue_bit); + +struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, + struct mm_struct *mm, + struct svm_range_bo *svm_bo); +#if IS_ENABLED(CONFIG_HSA_AMD) +bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); +struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); +#else +static inline +bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) +{ + return false; +} + +static inline +struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) +{ + return NULL; +} + +static inline +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +{ + return 0; +} + +static inline +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) +{ + return 0; +} +#endif /* Shared API */ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, @@ -169,15 +221,20 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, uint32_t *flags); uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); +uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); +uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); +int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min); +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when * this is called in hqd_load functions, so it should never fault in * the first place. This resolves a circular lock dependency involving - * four locks, including the DQM lock and mmap_sem. + * four locks, including the DQM lock and mmap_lock. */ #define read_user_wptr(mmptr, wptr, dst) \ ({ \ @@ -186,10 +243,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s pagefault_disable(); \ if ((mmptr) == current->mm) { \ valid = !get_user((dst), (wptr)); \ - } else if (current->mm == NULL) { \ - use_mm(mmptr); \ + } else if (current->flags & PF_KTHREAD) { \ + kthread_use_mm(mmptr); \ valid = !get_user((dst), (wptr)); \ - unuse_mm(mmptr); \ + kthread_unuse_mm(mmptr); \ } \ pagefault_enable(); \ } \ @@ -197,65 +254,153 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s }) /* GPUVM API */ -int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, - void **vm, void **process_info, - struct dma_fence **ef); +#define drm_priv_to_vm(drm_priv) \ + (&((struct amdgpu_fpriv *) \ + ((struct drm_file *)(drm_priv))->driver_priv)->vm) + int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, - struct file *filp, unsigned int pasid, - void **vm, void **process_info, + struct file *filp, u32 pasid, + void **process_info, struct dma_fence **ef); -void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm); -void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm); -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); -uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv); +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct kgd_dev *kgd, uint64_t va, uint64_t size, - void *vm, struct kgd_mem **mem, + void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags); int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, struct kgd_mem *mem, void **kptr, uint64_t *size); +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem); + int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); - int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, struct kfd_vm_fault_info *info); - int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct dma_buf *dmabuf, - uint64_t va, void *vm, + uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); - +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config); +void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd); +#if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); +void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, + struct amdgpu_vm *vm); void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); - +void amdgpu_amdkfd_reserve_system_mem(uint64_t size); +#else +static inline +void amdgpu_amdkfd_gpuvm_init_mem_limits(void) +{ +} + +static inline +void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ +} + +static inline +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +{ +} +#endif /* KGD2KFD callbacks */ +int kgd2kfd_quiesce_mm(struct mm_struct *mm); +int kgd2kfd_resume_mm(struct mm_struct *mm); +int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, + struct dma_fence *fence); +#if IS_ENABLED(CONFIG_HSA_AMD) int kgd2kfd_init(void); void kgd2kfd_exit(void); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - unsigned int asic_type, bool vf); +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); -void kgd2kfd_suspend(struct kfd_dev *kfd); -int kgd2kfd_resume(struct kfd_dev *kfd); +void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); +int kgd2kfd_resume_iommu(struct kfd_dev *kfd); +int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); -int kgd2kfd_quiesce_mm(struct mm_struct *mm); -int kgd2kfd_resume_mm(struct mm_struct *mm); -int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, - struct dma_fence *fence); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); - +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); +#else +static inline int kgd2kfd_init(void) +{ + return -ENOENT; +} + +static inline void kgd2kfd_exit(void) +{ +} + +static inline +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) +{ + return NULL; +} + +static inline +bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, + const struct kgd2kfd_shared_resources *gpu_resources) +{ + return false; +} + +static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) +{ +} + +static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +{ +} + +static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd) +{ + return 0; +} + +static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +{ + return 0; +} + +static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) +{ + return 0; +} + +static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) +{ + return 0; +} + +static inline +void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) +{ +} + +static inline +void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) +{ +} + +static inline +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) +{ +} +#endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c new file mode 100644 index 000000000000..46cd4ee6bafb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "amdgpu_amdkfd_arcturus.h" +#include "amdgpu_amdkfd_gfx_v9.h" + +const struct kfd2kgd_calls aldebaran_kfd2kgd = { + .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, + .init_interrupts = kgd_gfx_v9_init_interrupts, + .hqd_load = kgd_gfx_v9_hqd_load, + .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, + .hqd_sdma_load = kgd_arcturus_hqd_sdma_load, + .hqd_dump = kgd_gfx_v9_hqd_dump, + .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump, + .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, + .hqd_destroy = kgd_gfx_v9_hqd_destroy, + .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, + .address_watch_disable = kgd_gfx_v9_address_watch_disable, + .address_watch_execute = kgd_gfx_v9_address_watch_execute, + .wave_control_execute = kgd_gfx_v9_wave_control_execute, + .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, + .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index b6713e0ed1b2..5a7f680bcb3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -22,10 +22,10 @@ #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> -#include <linux/mmu_context.h> #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_amdkfd_arcturus.h" #include "sdma0/sdma0_4_2_2_offset.h" #include "sdma0/sdma0_4_2_2_sh_mask.h" #include "sdma1/sdma1_4_2_2_offset.h" @@ -46,6 +46,8 @@ #include "soc15.h" #include "soc15d.h" #include "amdgpu_amdkfd_gfx_v9.h" +#include "gfxhub_v1_0.h" +#include "mmhub_v9_4.h" #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -69,35 +71,59 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t sdma_engine_reg_base[8] = { - SOC15_REG_OFFSET(SDMA0, 0, - mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA1, 0, - mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA2, 0, - mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA3, 0, - mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA4, 0, - mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA5, 0, - mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA6, 0, - mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA7, 0, - mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL - }; - - uint32_t retval = sdma_engine_reg_base[engine_id] + uint32_t sdma_engine_reg_base = 0; + uint32_t sdma_rlc_reg_offset; + + switch (engine_id) { + default: + dev_warn(adev->dev, + "Invalid sdma engine id (%d), using engine id 0\n", + engine_id); + fallthrough; + case 0: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 1: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL; + break; + case 2: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0, + mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; + break; + case 3: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0, + mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL; + break; + case 4: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0, + mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL; + break; + case 5: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0, + mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL; + break; + case 6: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0, + mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL; + break; + case 7: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0, + mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL; + break; + } + + sdma_rlc_reg_offset = sdma_engine_reg_base + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, - queue_id, retval); + queue_id, sdma_rlc_reg_offset); - return retval; + return sdma_rlc_reg_offset; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -167,7 +193,7 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { @@ -199,7 +225,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; @@ -218,7 +244,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, unsigned int utimeout) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -263,22 +289,22 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, + .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, + .hqd_sdma_load = kgd_arcturus_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump, .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, .address_watch_disable = kgd_gfx_v9_address_watch_disable, .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, - .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, - .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, - .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, - .get_hive_id = amdgpu_amdkfd_get_hive_id, + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, + .set_vm_context_page_table_base = + kgd_gfx_v9_set_vm_context_page_table_base, + .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, + .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h new file mode 100644 index 000000000000..ce08131b7b5f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm); +int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs); +bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); +int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int utimeout); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c index 3107b9575929..1d0dbff87d3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/sched/mm.h> #include "amdgpu_amdkfd.h" +#include "kfd_svm.h" static const struct dma_fence_ops amdkfd_fence_ops; static atomic_t fence_seq = ATOMIC_INIT(0); @@ -40,13 +41,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0); * All the BOs in a process share an eviction fence. When process X wants * to map VRAM memory but TTM can't find enough space, TTM will attempt to * evict BOs from its LRU list. TTM checks if the BO is valuable to evict - * by calling ttm_bo_driver->eviction_valuable(). + * by calling ttm_device_funcs->eviction_valuable(). * - * ttm_bo_driver->eviction_valuable() - will return false if the BO belongs + * ttm_device_funcs->eviction_valuable() - will return false if the BO belongs * to process X. Otherwise, it will return true to indicate BO can be * evicted by TTM. * - * If ttm_bo_driver->eviction_valuable returns true, then TTM will continue + * If ttm_device_funcs->eviction_valuable returns true, then TTM will continue * the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move * --> amdgpu_copy_buffer(). This sets up job in GPU scheduler. * @@ -60,7 +61,8 @@ static atomic_t fence_seq = ATOMIC_INIT(0); */ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm) + struct mm_struct *mm, + struct svm_range_bo *svm_bo) { struct amdgpu_amdkfd_fence *fence; @@ -73,7 +75,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, fence->mm = mm; get_task_comm(fence->timeline_name, current); spin_lock_init(&fence->lock); - + fence->svm_bo = svm_bo; dma_fence_init(&fence->base, &amdkfd_fence_ops, &fence->lock, context, atomic_inc_return(&fence_seq)); @@ -111,6 +113,8 @@ static const char *amdkfd_fence_get_timeline_name(struct dma_fence *f) * a KFD BO and schedules a job to move the BO. * If fence is already signaled return true. * If fence is not signaled schedule a evict KFD process work item. + * + * @f: dma_fence */ static bool amdkfd_fence_enable_signaling(struct dma_fence *f) { @@ -122,16 +126,20 @@ static bool amdkfd_fence_enable_signaling(struct dma_fence *f) if (dma_fence_is_signaled(f)) return true; - if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) - return true; - + if (!fence->svm_bo) { + if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) + return true; + } else { + if (!svm_range_schedule_evict_svm_bo(fence)) + return true; + } return false; } /** * amdkfd_fence_release - callback that fence can be freed * - * @fence: fence + * @f: dma_fence * * This function is called when the reference count becomes zero. * Drops the mm_struct reference and RCU schedules freeing up the fence. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 61cd707158e4..960acf68150a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -19,12 +19,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/mmu_context.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" -#include "navi10_enum.h" #include "athub/athub_2_0_0_offset.h" #include "athub/athub_2_0_0_sh_mask.h" #include "oss/osssys_5_0_0_offset.h" @@ -33,7 +31,6 @@ #include "v10_structs.h" #include "nv.h" #include "nvd.h" -#include "gfxhub_v2_0.h" enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -42,38 +39,6 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; -#if 0 -/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but - * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu - * changes commented out related code, doing the same here for now but - * need to sync with Ken et al - */ - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); -#endif - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -107,13 +72,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, lock_srbm(kgd, mec, pipe, queue_id, 0); } -static uint32_t get_queue_mask(struct amdgpu_device *adev, +static uint64_t get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe + - queue_id) & 31; + unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + + queue_id; - return ((uint32_t)1) << bit; + return 1ull << bit; } static void release_queue(struct kgd_dev *kgd) @@ -131,14 +96,14 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, lock_srbm(kgd, 0, 0, 0, vmid); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); + WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ unlock_srbm(kgd); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -196,7 +161,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) lock_srbm(kgd, mec, pipe, 0, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), + WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); @@ -268,34 +233,19 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); acquire_queue(kgd, pipe_id, queue_id); - /* HIQ is set during driver init period with vmid set to 0*/ - if (m->cp_hqd_vmid == 0) { - uint32_t value, mec, pipe; - - mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; - pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - - pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", - mec, pipe, queue_id); - value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); - value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, - ((mec << 5) | (pipe << 3) | queue_id | 0x80)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); - } - /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); for (reg = hqd_base; reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) - WREG32(reg, mqd_hqd[reg - hqd_base]); + WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data); if (wptr) { /* Don't read wptr with get_user because the user @@ -324,32 +274,86 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, lower_32_bits(guessed_wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, upper_32_bits(guessed_wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, lower_32_bits((uint64_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, upper_32_bits((uint64_t)wptr)); - pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, get_queue_mask(adev, pipe_id, queue_id)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), - get_queue_mask(adev, pipe_id, queue_id)); + pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); + WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), + WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR, REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); release_queue(kgd); return 0; } +static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct v10_compute_mqd *m; + uint32_t mec, pipe; + int r; + + m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + + spin_lock(&adev->gfx.kiq.ring_lock); + r = amdgpu_ring_alloc(kiq_ring, 7); + if (r) { + pr_err("Failed to alloc KIQ (%d).\n", r); + goto out_unlock; + } + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(queue_id) | + PACKET3_MAP_QUEUES_PIPE(pipe) | + PACKET3_MAP_QUEUES_ME((mec - 1)) | + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); + amdgpu_ring_commit(kiq_ring); + +out_unlock: + spin_unlock(&adev->gfx.kiq.ring_lock); + release_queue(kgd); + + return r; +} + static int kgd_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) @@ -361,7 +365,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ break; \ (*dump)[i][0] = (addr) << 2; \ - (*dump)[i++][1] = RREG32(addr); \ + (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \ } while (0) *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); @@ -493,13 +497,13 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t low, high; acquire_queue(kgd, pipe_id, queue_id); - act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); - if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && - high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) + if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } release_queue(kgd); @@ -536,6 +540,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd); + if (amdgpu_in_reset(adev)) + return -EIO; + #if 0 unsigned long flags; int retry; @@ -553,6 +560,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: type = RESET_WAVES; break; + case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE: + type = SAVE_WAVES; + break; default: type = DRAIN_PIPE; break; @@ -614,11 +624,11 @@ loop: preempt_enable(); #endif - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { - temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { @@ -686,71 +696,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) -{ - signed long r; - uint32_t seq; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - - spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/ - amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); - amdgpu_ring_write(ring, - PACKET3_INVALIDATE_TLBS_DST_SEL(1) | - PACKET3_INVALIDATE_TLBS_PASID(pasid)); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); - - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); - if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld.\n", r); - return -ETIME; - } - - return 0; -} - -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid; - uint16_t queried_pasid; - bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - - if (amdgpu_emu_mode == 0 && ring->sched.ready) - return invalidate_tlbs_with_kiq(adev, pasid); - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - ret = get_atc_vmid_pasid_mapping_info(kgd, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); - break; - } - } - - return 0; -} - -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid %d\n", vmid); - return 0; - } - - amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0); - return 0; -} - static int kgd_address_watch_disable(struct kgd_dev *kgd) { return 0; @@ -774,8 +719,8 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, mutex_lock(&adev->grbm_idx_mutex); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); + WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd); data = REG_SET_FIELD(data, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -784,7 +729,7 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -809,7 +754,34 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, } /* SDMA is on gfxhub as well for Navi1* series */ - gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base); + adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); +} + +static void program_trap_handler_settings(struct kgd_dev *kgd, + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + /* + * Program TBA registers + */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO), + lower_32_bits(tba_addr >> 8)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI), + upper_32_bits(tba_addr >> 8) | + (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT)); + + /* + * Program TMA registers + */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO), + lower_32_bits(tma_addr >> 8)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), + upper_32_bits(tma_addr >> 8)); + + unlock_srbm(kgd); } const struct kfd2kgd_calls gfx_v10_kfd2kgd = { @@ -817,6 +789,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, .init_interrupts = kgd_init_interrupts, .hqd_load = kgd_hqd_load, + .hiq_mqd_load = kgd_hiq_mqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, .hqd_dump = kgd_hqd_dump, .hqd_sdma_dump = kgd_hqd_sdma_dump, @@ -830,9 +803,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, - .get_tile_config = amdgpu_amdkfd_get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .get_hive_id = amdgpu_amdkfd_get_hive_id, + .program_trap_handler_settings = program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c new file mode 100644 index 000000000000..dac0d751d5af --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -0,0 +1,862 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <linux/mmu_context.h> +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "gc/gc_10_3_0_offset.h" +#include "gc/gc_10_3_0_sh_mask.h" +#include "oss/osssys_5_0_0_offset.h" +#include "oss/osssys_5_0_0_sh_mask.h" +#include "soc15_common.h" +#include "v10_structs.h" +#include "nv.h" +#include "nvd.h" + +enum hqd_dequeue_request_type { + NO_ACTION = 0, + DRAIN_PIPE, + RESET_WAVES, + SAVE_WAVES +}; + +static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) +{ + return (struct amdgpu_device *)kgd; +} + +static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, + uint32_t queue, uint32_t vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + mutex_lock(&adev->srbm_mutex); + nv_grbm_select(adev, mec, pipe, queue, vmid); +} + +static void unlock_srbm(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + nv_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + lock_srbm(kgd, mec, pipe, queue_id, 0); +} + +static uint64_t get_queue_mask(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id) +{ + unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + + queue_id; + + return 1ull << bit; +} + +static void release_queue(struct kgd_dev *kgd) +{ + unlock_srbm(kgd); +} + +static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, + uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); + /* APE1 no longer exists on GFX9 */ + + unlock_srbm(kgd); +} + +/* ATC is defeatured on Sienna_Cichlid */ +static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; + + /* Mapping vmid to pasid also for IH block */ + pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n", + vmid, pasid); + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value); + + return 0; +} + +static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t mec; + uint32_t pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + lock_srbm(kgd, mec, pipe, 0, 0); + + WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, + CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | + CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); + + unlock_srbm(kgd); + + return 0; +} + +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t sdma_engine_reg_base = 0; + uint32_t sdma_rlc_reg_offset; + + switch (engine_id) { + default: + dev_warn(adev->dev, + "Invalid sdma engine id (%d), using engine id 0\n", + engine_id); + fallthrough; + case 0: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 1: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 2: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 3: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + } + + sdma_rlc_reg_offset = sdma_engine_reg_base + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, sdma_rlc_reg_offset); + + return sdma_rlc_reg_offset; +} + +static inline struct v10_compute_mqd *get_mqd(void *mqd) +{ + return (struct v10_compute_mqd *)mqd; +} + +static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v10_sdma_mqd *)mqd; +} + +static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v10_compute_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, hqd_base, data; + + m = get_mqd(mqd); + + pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); + acquire_queue(kgd, pipe_id, queue_id); + + /* HIQ is set during driver init period with vmid set to 0*/ + if (m->cp_hqd_vmid == 0) { + uint32_t value, mec, pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + value = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); + value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, + ((mec << 5) | (pipe << 3) | queue_id | 0x80)); + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, value); + } + + /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + + for (reg = hqd_base; + reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]); + + + /* Activate doorbell logic before triggering WPTR poll. */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data); + + if (wptr) { + /* Don't read wptr with get_user because the user + * context may not be accessible (if this function + * runs in a work queue). Instead trigger a one-shot + * polling read from memory in the CP. This assumes + * that wptr is GPU-accessible in the queue's VMID via + * ATC or SVM. WPTR==RPTR before starting the poll so + * the CP starts fetching new commands from the right + * place. + * + * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit + * tricky. Assume that the queue didn't overflow. The + * number of valid bits in the 32-bit RPTR depends on + * the queue size. The remaining bits are taken from + * the saved 64-bit WPTR. If the WPTR wrapped, add the + * queue size. + */ + uint32_t queue_size = + 2 << REG_GET_FIELD(m->cp_hqd_pq_control, + CP_HQD_PQ_CONTROL, QUEUE_SIZE); + uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); + + if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) + guessed_wptr += queue_size; + guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); + guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; + + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, + lower_32_bits(guessed_wptr)); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, + upper_32_bits(guessed_wptr)); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, + lower_32_bits((uint64_t)wptr)); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, + upper_32_bits((uint64_t)wptr)); + pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); + WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); + } + + /* Start the EOP fetcher */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), + REG_SET_FIELD(m->cp_hqd_eop_rptr, + CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); + + release_queue(kgd); + + return 0; +} + +static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct v10_compute_mqd *m; + uint32_t mec, pipe; + int r; + + m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + + spin_lock(&adev->gfx.kiq.ring_lock); + r = amdgpu_ring_alloc(kiq_ring, 7); + if (r) { + pr_err("Failed to alloc KIQ (%d).\n", r); + goto out_unlock; + } + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(queue_id) | + PACKET3_MAP_QUEUES_PIPE(pipe) | + PACKET3_MAP_QUEUES_ME((mec - 1)) | + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); + amdgpu_ring_commit(kiq_ring); + +out_unlock: + spin_unlock(&adev->gfx.kiq.ring_lock); + release_queue(kgd); + + return r; +} + +static int hqd_dump_v10_3(struct kgd_dev *kgd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t i = 0, reg; +#define HQD_N_REGS 56 +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \ + } while (0) + + *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + acquire_queue(kgd, pipe_id, queue_id); + + for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + DUMP_REG(reg); + + release_queue(kgd); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v10_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); + + return 0; +} + +static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+12) + + *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; + reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; + reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t act; + bool retval = false; + uint32_t low, high; + + acquire_queue(kgd, pipe_id, queue_id); + act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); + if (act) { + low = lower_32_bits(queue_address >> 8); + high = upper_32_bits(queue_address >> 8); + + if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) + retval = true; + } + release_queue(kgd); + return retval; +} + +static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v10_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, + unsigned int utimeout, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + enum hqd_dequeue_request_type type; + unsigned long end_jiffies; + uint32_t temp; + struct v10_compute_mqd *m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + if (m->cp_hqd_vmid == 0) + WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); + + switch (reset_type) { + case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: + type = DRAIN_PIPE; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: + type = RESET_WAVES; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE: + type = SAVE_WAVES; + break; + default: + type = DRAIN_PIPE; + break; + } + + WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type); + + end_jiffies = (utimeout * HZ / 1000) + jiffies; + while (true) { + temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("cp queue pipe %d queue %d preemption failed\n", + pipe_id, queue_id); + release_queue(kgd); + return -ETIME; + } + usleep_range(500, 1000); + } + + release_queue(kgd); + return 0; +} + +static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, + unsigned int utimeout) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v10_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); + temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); + if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); + + return 0; +} + + +static int address_watch_disable_v10_3(struct kgd_dev *kgd) +{ + return 0; +} + +static int address_watch_execute_v10_3(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo) +{ + return 0; +} + +static int wave_control_execute_v10_3(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); + + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SA_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SE_BROADCAST_WRITES, 1); + + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset) +{ + return 0; +} + +static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + /* SDMA is on gfxhub as well for Navi1* series */ + adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); +} + +static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd, + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + /* + * Program TBA registers + */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO), + lower_32_bits(tba_addr >> 8)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI), + upper_32_bits(tba_addr >> 8) | + (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT)); + + /* + * Program TMA registers + */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO), + lower_32_bits(tma_addr >> 8)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), + upper_32_bits(tma_addr >> 8)); + + unlock_srbm(kgd); +} + +#if 0 +uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, + uint32_t trap_debug_wave_launch_mode, + uint32_t vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data = 0; + uint32_t orig_wave_cntl_value; + uint32_t orig_stall_vmid; + + mutex_lock(&adev->grbm_idx_mutex); + + orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC, + 0, + mmSPI_GDBG_WAVE_CNTL)); + orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value, + SPI_GDBG_WAVE_CNTL, + STALL_VMID); + + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); + + data = 0; + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + mutex_lock(&adev->grbm_idx_mutex); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, + uint32_t trap_override, + uint32_t trap_mask) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); + + data = 0; + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, + EXCP_EN, trap_mask); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, + REPLACE, trap_override); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); + + data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, + uint8_t wave_launch_mode, + uint32_t vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data = 0; + bool is_stall_mode; + bool is_mode_set; + + is_stall_mode = (wave_launch_mode == 4); + is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4); + + mutex_lock(&adev->grbm_idx_mutex); + + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, + VMID_MASK, is_mode_set ? 1 << vmid : 0); + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, + MODE, is_mode_set ? wave_launch_mode : 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data); + + data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, + STALL_VMID, is_stall_mode ? 1 << vmid : 0); + data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, + STALL_RA, is_stall_mode ? 1 : 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); + + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +/* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values + * The values read are: + * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. + * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. + * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. + * gws_wait_time -- Wait Count for Global Wave Syncs. + * que_sleep_wait_time -- Wait Count for Dequeue Retry. + * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. + * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. + * deq_retry_wait_time -- Wait Count for Global Wave Syncs. + */ +void get_iq_wait_times_v10_3(struct kgd_dev *kgd, + uint32_t *wait_times) + +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); +} + +void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data) +{ + *reg_data = wait_times; + + *reg_data = REG_SET_FIELD(*reg_data, + CP_IQ_WAIT_TIME2, + SCH_WAVE, + grace_period); + + *reg_offset = mmCP_IQ_WAIT_TIME2; +} +#endif + +const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { + .program_sh_mem_settings = program_sh_mem_settings_v10_3, + .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3, + .init_interrupts = init_interrupts_v10_3, + .hqd_load = hqd_load_v10_3, + .hiq_mqd_load = hiq_mqd_load_v10_3, + .hqd_sdma_load = hqd_sdma_load_v10_3, + .hqd_dump = hqd_dump_v10_3, + .hqd_sdma_dump = hqd_sdma_dump_v10_3, + .hqd_is_occupied = hqd_is_occupied_v10_3, + .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3, + .hqd_destroy = hqd_destroy_v10_3, + .hqd_sdma_destroy = hqd_sdma_destroy_v10_3, + .address_watch_disable = address_watch_disable_v10_3, + .address_watch_execute = address_watch_execute_v10_3, + .wave_control_execute = wave_control_execute_v10_3, + .address_watch_get_offset = address_watch_get_offset_v10_3, + .get_atc_vmid_pasid_mapping_info = NULL, + .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, + .program_trap_handler_settings = program_trap_handler_settings_v10_3, +#if 0 + .enable_debug_trap = enable_debug_trap_v10_3, + .disable_debug_trap = disable_debug_trap_v10_3, + .set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3, + .set_wave_launch_mode = set_wave_launch_mode_v10_3, + .get_iq_wait_times = get_iq_wait_times_v10_3, + .build_grace_period_packet_info = build_grace_period_packet_info_v10_3, +#endif +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 6e6f0a99ec06..b91d27e39bad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/mmu_context.h> - #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "cikd.h" @@ -84,31 +82,6 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -166,7 +139,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -262,7 +235,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); - /* read_user_ptr may take the mm->mmap_sem. + /* read_user_ptr may take the mm->mmap_lock. * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ @@ -450,7 +423,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, unsigned long flags, end_jiffies; int retry; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; acquire_queue(kgd, pipe_id, queue_id); @@ -696,45 +669,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, lower_32_bits(page_table_base)); } -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid; - unsigned int tmp; - - if (adev->in_gpu_reset) - return -EIO; - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && - (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - break; - } - } - - return 0; -} - -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid\n"); - return 0; - } - - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - return 0; -} - /** * read_vmid_from_vmfault_reg - read vmid from register * @@ -769,9 +703,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index bfbddedb2380..5ce0ce704a21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/mmu_context.h> - #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "gfx_v8_0.h" @@ -41,31 +39,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -123,7 +96,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -249,7 +222,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); - /* read_user_ptr may take the mm->mmap_sem. + /* read_user_ptr may take the mm->mmap_lock. * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ @@ -446,7 +419,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, int retry; struct vi_mqd *m = get_mqd(mqd); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; acquire_queue(kgd, pipe_id, queue_id); @@ -657,45 +630,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, lower_32_bits(page_table_base)); } -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid; - unsigned int tmp; - - if (adev->in_gpu_reset) - return -EIO; - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && - (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - break; - } - } - - return 0; -} - -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid %d\n", vmid); - return -EINVAL; - } - - WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); - RREG32(mmVM_INVALIDATE_RESPONSE); - return 0; -} - const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -715,8 +649,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 47c853ef1051..bcc1cbeb8799 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -19,8 +19,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/mmu_context.h> - #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "gc/gc_9_0_offset.h" @@ -38,39 +36,16 @@ #include "v9_structs.h" #include "soc15.h" #include "soc15d.h" -#include "mmhub_v1_0.h" -#include "gfxhub_v1_0.h" -#include "gmc_v9_0.h" - +#include "gfx_v9_0.h" +#include "amdgpu_amdkfd_gfx_v9.h" enum hqd_dequeue_request_type { NO_ACTION = 0, DRAIN_PIPE, - RESET_WAVES + RESET_WAVES, + SAVE_WAVES }; - -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -104,13 +79,13 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, lock_srbm(kgd, mec, pipe, queue_id, 0); } -static uint32_t get_queue_mask(struct amdgpu_device *adev, +static uint64_t get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe + - queue_id) & 31; + unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + + queue_id; - return ((uint32_t)1) << bit; + return 1ull << bit; } static void release_queue(struct kgd_dev *kgd) @@ -135,7 +110,7 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -220,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t sdma_engine_reg_base[2] = { - SOC15_REG_OFFSET(SDMA0, 0, - mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA1, 0, - mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL - }; - uint32_t retval = sdma_engine_reg_base[engine_id] + uint32_t sdma_engine_reg_base = 0; + uint32_t sdma_rlc_reg_offset; + + switch (engine_id) { + default: + dev_warn(adev->dev, + "Invalid sdma engine id (%d), using engine id 0\n", + engine_id); + fallthrough; + case 0: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 1: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + } + + sdma_rlc_reg_offset = sdma_engine_reg_base + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, - queue_id, retval); + queue_id, sdma_rlc_reg_offset); - return retval; + return sdma_rlc_reg_offset; } static inline struct v9_mqd *get_mqd(void *mqd) @@ -259,21 +247,6 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, acquire_queue(kgd, pipe_id, queue_id); - /* HIQ is set during driver init period with vmid set to 0*/ - if (m->cp_hqd_vmid == 0) { - uint32_t value, mec, pipe; - - mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; - pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - - pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", - mec, pipe, queue_id); - value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); - value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, - ((mec << 5) | (pipe << 3) | queue_id | 0x80)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); - } - /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); @@ -324,7 +297,7 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), - get_queue_mask(adev, pipe_id, queue_id)); + (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ @@ -340,6 +313,59 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, return 0; } +int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + struct v9_mqd *m; + uint32_t mec, pipe; + int r; + + m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + + spin_lock(&adev->gfx.kiq.ring_lock); + r = amdgpu_ring_alloc(kiq_ring, 7); + if (r) { + pr_err("Failed to alloc KIQ (%d).\n", r); + goto out_unlock; + } + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(queue_id) | + PACKET3_MAP_QUEUES_PIPE(pipe) | + PACKET3_MAP_QUEUES_ME((mec - 1)) | + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); + amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); + amdgpu_ring_commit(kiq_ring); + +out_unlock: + spin_unlock(&adev->gfx.kiq.ring_lock); + release_queue(kgd); + + return r; +} + int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) @@ -526,7 +552,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v9_mqd *m = get_mqd(mqd); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; acquire_queue(kgd, pipe_id, queue_id); @@ -541,6 +567,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: type = RESET_WAVES; break; + case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE: + type = SAVE_WAVES; + break; default: type = DRAIN_PIPE; break; @@ -618,100 +647,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid, - uint32_t flush_type) -{ - signed long r; - uint32_t seq; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - - spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/ - amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); - amdgpu_ring_write(ring, - PACKET3_INVALIDATE_TLBS_DST_SEL(1) | - PACKET3_INVALIDATE_TLBS_ALL_HUB(1) | - PACKET3_INVALIDATE_TLBS_PASID(pasid) | - PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); - - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); - if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld.\n", r); - return -ETIME; - } - - return 0; -} - -int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int vmid, i; - uint16_t queried_pasid; - bool ret; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - uint32_t flush_type = 0; - - if (adev->in_gpu_reset) - return -EIO; - if (adev->gmc.xgmi.num_physical_nodes && - adev->asic_type == CHIP_VEGA20) - flush_type = 2; - - if (ring->sched.ready) - return invalidate_tlbs_with_kiq(adev, pasid, flush_type); - - for (vmid = 0; vmid < 16; vmid++) { - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) - continue; - - ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid, - &queried_pasid); - if (ret && queried_pasid == pasid) { - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - i, flush_type); - break; - } - } - - return 0; -} - -int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - int i; - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("non kfd vmid %d\n", vmid); - return 0; - } - - /* Use legacy mode tlb invalidation. - * - * Currently on Raven the code below is broken for anything but - * legacy mode due to a MMHUB power gating problem. A workaround - * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ - * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack - * bit. - * - * TODO 1: agree on the right set of invalidation registers for - * KFD use. Use the last one for now. Invalidate both GC and - * MMHUB. - * - * TODO 2: support range-based invalidation, requires kfg2kgd - * interface change - */ - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); - - return 0; -} - int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd) { return 0; @@ -758,8 +693,8 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, return 0; } -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, + uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -769,18 +704,208 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi return; } - /* TODO: take advantage of per-process address space size. For - * now, all processes share the same address space size, like - * on GFX8 and older. + adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); + + adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); +} + +static void lock_spi_csq_mutexes(struct amdgpu_device *adev) +{ + mutex_lock(&adev->srbm_mutex); + mutex_lock(&adev->grbm_idx_mutex); + +} + +static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) +{ + mutex_unlock(&adev->grbm_idx_mutex); + mutex_unlock(&adev->srbm_mutex); +} + +/** + * get_wave_count: Read device registers to get number of waves in flight for + * a particular queue. The method also returns the VMID associated with the + * queue. + * + * @adev: Handle of device whose registers are to be read + * @queue_idx: Index of queue in the queue-map bit-field + * @wave_cnt: Output parameter updated with number of waves in flight + * @vmid: Output parameter updated with VMID of queue whose wave count + * is being collected + */ +static void get_wave_count(struct amdgpu_device *adev, int queue_idx, + int *wave_cnt, int *vmid) +{ + int pipe_idx; + int queue_slot; + unsigned int reg_val; + + /* + * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID + * parameters to read out waves in flight. Get VMID if there are + * non-zero waves in flight. + */ + *vmid = 0xFF; + *wave_cnt = 0; + pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; + queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; + soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0); + reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + + queue_slot); + *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; + if (*wave_cnt != 0) + *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) & + CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT; +} + +/** + * kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each + * shader engine and aggregates the number of waves that are in flight for the + * process whose pasid is provided as a parameter. The process could have ZERO + * or more queues running and submitting waves to compute units. + * + * @kgd: Handle of device from which to get number of waves in flight + * @pasid: Identifies the process for which this query call is invoked + * @pasid_wave_cnt: Output parameter updated with number of waves in flight that + * belong to process with given pasid + * @max_waves_per_cu: Output parameter updated with maximum number of waves + * possible per Compute Unit + * + * Note: It's possible that the device has too many queues (oversubscription) + * in which case a VMID could be remapped to a different PASID. This could lead + * to an iaccurate wave count. Following is a high-level sequence: + * Time T1: vmid = getVmid(); vmid is associated with Pasid P1 + * Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2 + * In the sequence above wave count obtained from time T1 will be incorrectly + * lost or added to total wave count. + * + * The registers that provide the waves in flight are: + * + * SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a + * queue is slotted, OFF if there is no queue. A process could have ZERO or + * more queues slotted and submitting waves to be run on compute units. Even + * when there is a queue it is possible there could be zero wave fronts, this + * can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem + * command + * + * For each bit that is ON from above: + * + * Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the + * number of waves that are in flight for the queue at specified index. The + * index ranges from 0 to 7. + * + * If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID + * of the wave(s). + * + * Determine if VMID from above step maps to pasid provided as parameter. If + * it matches agrregate the wave count. That the VMID will not match pasid is + * a normal condition i.e. a device is expected to support multiple queues + * from multiple proceses. + * + * Reading registers referenced above involves programming GRBM appropriately + */ +void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, + int *pasid_wave_cnt, int *max_waves_per_cu) +{ + int qidx; + int vmid; + int se_idx; + int sh_idx; + int se_cnt; + int sh_cnt; + int wave_cnt; + int queue_map; + int pasid_tmp; + int max_queue_cnt; + int vmid_wave_cnt = 0; + struct amdgpu_device *adev; + DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); + + adev = get_amdgpu_device(kgd); + lock_spi_csq_mutexes(adev); + soc15_grbm_select(adev, 1, 0, 0, 0); + + /* + * Iterate through the shader engines and arrays of the device + * to get number of waves in flight + */ + bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap, + KGD_MAX_QUEUES); + max_queue_cnt = adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe; + sh_cnt = adev->gfx.config.max_sh_per_se; + se_cnt = adev->gfx.config.max_shader_engines; + for (se_idx = 0; se_idx < se_cnt; se_idx++) { + for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { + + gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff); + queue_map = RREG32(SOC15_REG_OFFSET(GC, 0, + mmSPI_CSQ_WF_ACTIVE_STATUS)); + + /* + * Assumption: queue map encodes following schema: four + * pipes per each micro-engine, with each pipe mapping + * eight queues. This schema is true for GFX9 devices + * and must be verified for newer device families + */ + for (qidx = 0; qidx < max_queue_cnt; qidx++) { + + /* Skip qeueus that are not associated with + * compute functions + */ + if (!test_bit(qidx, cp_queue_bitmap)) + continue; + + if (!(queue_map & (1 << qidx))) + continue; + + /* Get number of waves in flight and aggregate them */ + get_wave_count(adev, qidx, &wave_cnt, &vmid); + if (wave_cnt != 0) { + pasid_tmp = + RREG32(SOC15_REG_OFFSET(OSSSYS, 0, + mmIH_VMID_0_LUT) + vmid); + if (pasid_tmp == pasid) + vmid_wave_cnt += wave_cnt; + } + } + } + } + + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + soc15_grbm_select(adev, 0, 0, 0, 0); + unlock_spi_csq_mutexes(adev); + + /* Update the output parameters and return */ + *pasid_wave_cnt = vmid_wave_cnt; + *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu * + adev->gfx.cu_info.max_waves_per_simd; +} + +void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + /* + * Program TBA registers + */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO), + lower_32_bits(tba_addr >> 8)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI), + upper_32_bits(tba_addr >> 8)); + + /* + * Program TMA registers */ - if (adev->asic_type == CHIP_ARCTURUS) { - /* Two MMHUBs */ - mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base); - mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base); - } else - mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO), + lower_32_bits(tma_addr >> 8)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), + upper_32_bits(tma_addr >> 8)); - gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); + unlock_srbm(kgd); } const struct kfd2kgd_calls gfx_v9_kfd2kgd = { @@ -788,6 +913,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_hqd_load, + .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, .hqd_sdma_dump = kgd_hqd_sdma_dump, @@ -801,9 +927,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, - .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, - .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, - .get_hive_id = amdgpu_amdkfd_get_hive_id, + .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, + .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index d9e9ad22b2bd..c63591106879 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -26,13 +26,16 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid); int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); +int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t doorbell_off); int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); @@ -57,9 +60,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, uint8_t vmid, uint16_t *p_pasid); -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config); + +void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, + uint32_t vmid, uint64_t page_table_base); +void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, + int *pasid_wave_cnt, int *max_waves_per_cu); +void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 888209eb8cec..0e9cfe99ae9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -26,12 +26,12 @@ #include <linux/sched/task.h> #include "amdgpu_object.h" +#include "amdgpu_gem.h" #include "amdgpu_vm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" - -/* BO flag to indicate a KFD userptr BO */ -#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) +#include <uapi/linux/kfd_ioctl.h> +#include "amdgpu_xgmi.h" /* Userptr restore delay, just long enough to allow consecutive VM * changes to accumulate @@ -47,12 +47,6 @@ static struct { spinlock_t mem_limit_lock; } kfd_mem_limit; -/* Struct used for amdgpu_amdkfd_bo_validate */ -struct amdgpu_vm_parser { - uint32_t domain; - bool wait; -}; - static const char * const domain_bit_to_string[] = { "CPU", "GTT", @@ -72,20 +66,20 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) return (struct amdgpu_device *)kgd; } -static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, +static bool kfd_mem_is_attached(struct amdgpu_vm *avm, struct kgd_mem *mem) { - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; - list_for_each_entry(entry, &mem->bo_va_list, bo_list) + list_for_each_entry(entry, &mem->attachments, list) if (entry->bo_va->base.vm == avm) - return false; + return true; - return true; + return false; } /* Set memory usage limits. Current, limits are - * System (TTM + userptr) memory - 3/4th System RAM + * System (TTM + userptr) memory - 15/16th System RAM * TTM memory - 3/8th System RAM */ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) @@ -94,17 +88,22 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) uint64_t mem; si_meminfo(&si); - mem = si.totalram - si.totalhigh; + mem = si.freeram - si.freehigh; mem *= si.mem_unit; spin_lock_init(&kfd_mem_limit.mem_limit_lock); - kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); + kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", (kfd_mem_limit.max_system_mem_limit >> 20), (kfd_mem_limit.max_ttm_mem_limit >> 20)); } +void amdgpu_amdkfd_reserve_system_mem(uint64_t size) +{ + kfd_mem_limit.system_mem_used += size; +} + /* Estimate page table size needed to represent a given memory size * * With 4KB pages, we need one 8 byte PTE for each 4KB of memory @@ -117,6 +116,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) */ #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) +static size_t amdgpu_amdkfd_acc_size(uint64_t size) +{ + size >>= PAGE_SHIFT; + size *= sizeof(dma_addr_t) + sizeof(void *); + + return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) + + __roundup_pow_of_two(sizeof(struct ttm_tt)) + + PAGE_ALIGN(size); +} + static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 domain, bool sg) { @@ -125,8 +134,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; int ret = 0; - acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, - sizeof(struct amdgpu_bo)); + acc_size = amdgpu_amdkfd_acc_size(size); vram_needed = 0; if (domain == AMDGPU_GEM_DOMAIN_GTT) { @@ -147,8 +155,12 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, spin_lock(&kfd_mem_limit.mem_limit_lock); + if (kfd_mem_limit.system_mem_used + system_mem_needed > + kfd_mem_limit.max_system_mem_limit) + pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); + if ((kfd_mem_limit.system_mem_used + system_mem_needed > - kfd_mem_limit.max_system_mem_limit) || + kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || (adev->kfd.vram_used + vram_needed > @@ -169,8 +181,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev, { size_t acc_size; - acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, - sizeof(struct amdgpu_bo)); + acc_size = amdgpu_amdkfd_acc_size(size); spin_lock(&kfd_mem_limit.mem_limit_lock); if (domain == AMDGPU_GEM_DOMAIN_GTT) { @@ -202,7 +213,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) u32 domain = bo->preferred_domains; bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); - if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { + if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { domain = AMDGPU_GEM_DOMAIN_CPU; sg = false; } @@ -230,12 +241,11 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, if (!ef) return -EINVAL; - old = dma_resv_get_list(resv); + old = dma_resv_shared_list(resv); if (!old) return 0; - new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]), - GFP_KERNEL); + new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); if (!new) return -ENOMEM; @@ -257,14 +267,12 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, new->shared_count = k; /* Install the new fence list, seqcount provides the barriers */ - preempt_disable(); write_seqcount_begin(&resv->seq); RCU_INIT_POINTER(resv->fence, new); write_seqcount_end(&resv->seq); - preempt_enable(); /* Drop the references to the removed fences or move them to ef_list */ - for (i = j, k = 0; i < old->shared_count; ++i) { + for (i = j; i < old->shared_count; ++i) { struct dma_fence *f; f = rcu_dereference_protected(new->shared[i], @@ -276,6 +284,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, return 0; } +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +{ + struct amdgpu_bo *root = bo; + struct amdgpu_vm_bo_base *vm_bo; + struct amdgpu_vm *vm; + struct amdkfd_process_info *info; + struct amdgpu_amdkfd_fence *ef; + int ret; + + /* we can always get vm_bo from root PD bo.*/ + while (root->parent) + root = root->parent; + + vm_bo = root->vm_bo; + if (!vm_bo) + return 0; + + vm = vm_bo->vm; + if (!vm) + return 0; + + info = vm->process_info; + if (!info || !info->eviction_fence) + return 0; + + ef = container_of(dma_fence_get(&info->eviction_fence->base), + struct amdgpu_amdkfd_fence, base); + + BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); + ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); + dma_resv_unlock(bo->tbo.base.resv); + + dma_fence_put(&ef->base); + return ret; +} + static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, bool wait) { @@ -298,11 +342,9 @@ validate_fail: return ret; } -static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) +static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) { - struct amdgpu_vm_parser *p = param; - - return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); + return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); } /* vm_validate_pt_pd_bos - Validate page table and directory BOs @@ -314,33 +356,28 @@ static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) */ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) { - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); - struct amdgpu_vm_parser param; int ret; - param.domain = AMDGPU_GEM_DOMAIN_VRAM; - param.wait = false; - - ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, - ¶m); + ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); if (ret) { - pr_err("amdgpu: failed to validate PT BOs\n"); + pr_err("failed to validate PT BOs\n"); return ret; } - ret = amdgpu_amdkfd_validate(¶m, pd); + ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd); if (ret) { - pr_err("amdgpu: failed to validate PD\n"); + pr_err("failed to validate PD\n"); return ret; } - vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); + vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); if (vm->use_cpu_for_update) { ret = amdgpu_bo_kmap(pd, NULL); if (ret) { - pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret); + pr_err("failed to kmap PD, ret=%d\n", ret); return ret; } } @@ -350,7 +387,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) { - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; @@ -358,30 +395,58 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) if (ret) return ret; - return amdgpu_sync_fence(NULL, sync, vm->last_update, false); + return amdgpu_sync_fence(sync, vm->last_update); } static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); - bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; + bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; + bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED; uint32_t mapping_flags; + uint64_t pte_flags; + bool snoop = false; mapping_flags = AMDGPU_VM_PAGE_READABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; switch (adev->asic_type) { case CHIP_ARCTURUS: - if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { if (bo_adev == adev) mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; else - mapping_flags |= AMDGPU_VM_MTYPE_UC; + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } else { + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } + break; + case CHIP_ALDEBARAN: + if (coherent && uncached) { + if (adev->gmc.xgmi.connected_to_cpu || + !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) + snoop = true; + mapping_flags |= AMDGPU_VM_MTYPE_UC; + } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + if (bo_adev == adev) { + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; + if (adev->gmc.xgmi.connected_to_cpu) + snoop = true; + } else { + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + if (amdgpu_xgmi_same_hive(adev, bo_adev)) + snoop = true; + } } else { + snoop = true; mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } @@ -391,90 +456,327 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } - return amdgpu_gem_va_map_flags(adev, mapping_flags); + pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags); + pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; + + return pte_flags; +} + +static int +kfd_mem_dmamap_userptr(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + enum dma_data_direction direction = + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + struct amdgpu_device *adev = attachment->adev; + struct ttm_tt *src_ttm = mem->bo->tbo.ttm; + struct ttm_tt *ttm = bo->tbo.ttm; + int ret; + + ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); + if (unlikely(!ttm->sg)) + return -ENOMEM; + + if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) + return -EINVAL; + + /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ + ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, + ttm->num_pages, 0, + (u64)ttm->num_pages << PAGE_SHIFT, + GFP_KERNEL); + if (unlikely(ret)) + goto free_sg; + + ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); + if (unlikely(ret)) + goto release_sg; + + drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, + ttm->num_pages); + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unmap_sg; + + return 0; + +unmap_sg: + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); +release_sg: + pr_err("DMA map userptr failed: %d\n", ret); + sg_free_table(ttm->sg); +free_sg: + kfree(ttm->sg); + ttm->sg = NULL; + return ret; +} + +static int +kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) +{ + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + +static int +kfd_mem_dmamap_attachment(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + switch (attachment->type) { + case KFD_MEM_ATT_SHARED: + return 0; + case KFD_MEM_ATT_USERPTR: + return kfd_mem_dmamap_userptr(mem, attachment); + case KFD_MEM_ATT_DMABUF: + return kfd_mem_dmamap_dmabuf(attachment); + default: + WARN_ON_ONCE(1); + } + return -EINVAL; +} + +static void +kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + enum dma_data_direction direction = + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + struct ttm_operation_ctx ctx = {.interruptible = false}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + struct amdgpu_device *adev = attachment->adev; + struct ttm_tt *ttm = bo->tbo.ttm; + + if (unlikely(!ttm->sg)) + return; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); + sg_free_table(ttm->sg); + kfree(ttm->sg); + ttm->sg = NULL; +} + +static void +kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) +{ + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + +static void +kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + switch (attachment->type) { + case KFD_MEM_ATT_SHARED: + break; + case KFD_MEM_ATT_USERPTR: + kfd_mem_dmaunmap_userptr(mem, attachment); + break; + case KFD_MEM_ATT_DMABUF: + kfd_mem_dmaunmap_dmabuf(attachment); + break; + default: + WARN_ON_ONCE(1); + } +} + +static int +kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_bo **bo) +{ + unsigned long bo_size = mem->bo->tbo.base.size; + struct drm_gem_object *gobj; + int ret; + + ret = amdgpu_bo_reserve(mem->bo, false); + if (ret) + return ret; + + ret = amdgpu_gem_object_create(adev, bo_size, 1, + AMDGPU_GEM_DOMAIN_CPU, + AMDGPU_GEM_CREATE_PREEMPTIBLE, + ttm_bo_type_sg, mem->bo->tbo.base.resv, + &gobj); + amdgpu_bo_unreserve(mem->bo); + if (ret) + return ret; + + *bo = gem_to_amdgpu_bo(gobj); + (*bo)->parent = amdgpu_bo_ref(mem->bo); + + return 0; +} + +static int +kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_bo **bo) +{ + struct drm_gem_object *gobj; + int ret; + + if (!mem->dmabuf) { + mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DRM_RDWR : 0); + if (IS_ERR(mem->dmabuf)) { + ret = PTR_ERR(mem->dmabuf); + mem->dmabuf = NULL; + return ret; + } + } + + gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); + if (IS_ERR(gobj)) + return PTR_ERR(gobj); + + /* Import takes an extra reference on the dmabuf. Drop it now to + * avoid leaking it. We only need the one reference in + * kgd_mem->dmabuf. + */ + dma_buf_put(mem->dmabuf); + + *bo = gem_to_amdgpu_bo(gobj); + (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; + (*bo)->parent = amdgpu_bo_ref(mem->bo); + + return 0; } -/* add_bo_to_vm - Add a BO to a VM +/* kfd_mem_attach - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added * to a VM. It can later be mapped and unmapped many times without * repeating these steps. * + * 0. Create BO for DMA mapping, if needed * 1. Allocate and initialize BO VA entry data structure * 2. Add BO to the VM * 3. Determine ASIC-specific PTE flags * 4. Alloc page tables and directories if needed * 4a. Validate new page tables and directories */ -static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, - struct amdgpu_vm *vm, bool is_aql, - struct kfd_bo_va_list **p_bo_va_entry) +static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_vm *vm, bool is_aql) { - int ret; - struct kfd_bo_va_list *bo_va_entry; - struct amdgpu_bo *bo = mem->bo; + struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); + unsigned long bo_size = mem->bo->tbo.base.size; uint64_t va = mem->va; - struct list_head *list_bo_va = &mem->bo_va_list; - unsigned long bo_size = bo->tbo.mem.size; + struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; + struct amdgpu_bo *bo[2] = {NULL, NULL}; + int i, ret; if (!va) { pr_err("Invalid VA when adding BO to VM\n"); return -EINVAL; } - if (is_aql) - va += bo_size; - - bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); - if (!bo_va_entry) - return -ENOMEM; + for (i = 0; i <= is_aql; i++) { + attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); + if (unlikely(!attachment[i])) { + ret = -ENOMEM; + goto unwind; + } - pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, - va + bo_size, vm); + pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, + va + bo_size, vm); - /* Add BO to VM internal data structures*/ - bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); - if (!bo_va_entry->bo_va) { - ret = -EINVAL; - pr_err("Failed to add BO object to VM. ret == %d\n", - ret); - goto err_vmadd; - } + if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && + amdgpu_xgmi_same_hive(adev, bo_adev))) { + /* Mappings on the local GPU and VRAM mappings in the + * local hive share the original BO + */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = mem->bo; + drm_gem_object_get(&bo[i]->tbo.base); + } else if (i > 0) { + /* Multiple mappings on the same GPU share the BO */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = bo[0]; + drm_gem_object_get(&bo[i]->tbo.base); + } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { + /* Create an SG BO to DMA-map userptrs on other GPUs */ + attachment[i]->type = KFD_MEM_ATT_USERPTR; + ret = kfd_mem_attach_userptr(adev, mem, &bo[i]); + if (ret) + goto unwind; + } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT && + mem->bo->tbo.type != ttm_bo_type_sg) { + /* GTT BOs use DMA-mapping ability of dynamic-attach + * DMA bufs. TODO: The same should work for VRAM on + * large-BAR GPUs. + */ + attachment[i]->type = KFD_MEM_ATT_DMABUF; + ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); + if (ret) + goto unwind; + } else { + /* FIXME: Need to DMA-map other BO types: + * large-BAR VRAM, doorbells, MMIO remap + */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = mem->bo; + drm_gem_object_get(&bo[i]->tbo.base); + } - bo_va_entry->va = va; - bo_va_entry->pte_flags = get_pte_flags(adev, mem); - bo_va_entry->kgd_dev = (void *)adev; - list_add(&bo_va_entry->bo_list, list_bo_va); + /* Add BO to VM internal data structures */ + attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + if (unlikely(!attachment[i]->bo_va)) { + ret = -ENOMEM; + pr_err("Failed to add BO object to VM. ret == %d\n", + ret); + goto unwind; + } - if (p_bo_va_entry) - *p_bo_va_entry = bo_va_entry; + attachment[i]->va = va; + attachment[i]->pte_flags = get_pte_flags(adev, mem); + attachment[i]->adev = adev; + list_add(&attachment[i]->list, &mem->attachments); - /* Allocate validate page tables if needed */ - ret = vm_validate_pt_pd_bos(vm); - if (ret) { - pr_err("validate_pt_pd_bos() failed\n"); - goto err_alloc_pts; + va += bo_size; } return 0; -err_alloc_pts: - amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); - list_del(&bo_va_entry->bo_list); -err_vmadd: - kfree(bo_va_entry); +unwind: + for (; i >= 0; i--) { + if (!attachment[i]) + continue; + if (attachment[i]->bo_va) { + amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); + list_del(&attachment[i]->list); + } + if (bo[i]) + drm_gem_object_put(&bo[i]->tbo.base); + kfree(attachment[i]); + } return ret; } -static void remove_bo_from_vm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, unsigned long size) +static void kfd_mem_detach(struct kfd_mem_attachment *attachment) { - pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", - entry->va, - entry->va + size, entry); - amdgpu_vm_bo_rmv(adev, entry->bo_va); - list_del(&entry->bo_list); - kfree(entry); + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + pr_debug("\t remove VA 0x%llx in entry %p\n", + attachment->va, attachment); + amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va); + drm_gem_object_put(&bo->tbo.base); + list_del(&attachment->list); + kfree(attachment); } static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, @@ -527,7 +829,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr) mutex_lock(&process_info->lock); - ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0); + ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0); if (ret) { pr_err("%s: Failed to set userptr: %d\n", __func__, ret); goto out; @@ -623,15 +925,15 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, false, &ctx->duplicates); - if (!ret) - ctx->reserved = true; - else { - pr_err("Failed to reserve buffers in ttm\n"); + if (ret) { + pr_err("Failed to reserve buffers in ttm.\n"); kfree(ctx->vm_pd); ctx->vm_pd = NULL; + return ret; } - return ret; + ctx->reserved = true; + return 0; } /** @@ -649,7 +951,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, struct bo_vm_reservation_context *ctx) { struct amdgpu_bo *bo = mem->bo; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; unsigned int i; int ret; @@ -661,7 +963,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, INIT_LIST_HEAD(&ctx->list); INIT_LIST_HEAD(&ctx->duplicates); - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { + list_for_each_entry(entry, &mem->attachments, list) { if ((vm && vm != entry->bo_va->base.vm) || (entry->is_mapped != map_type && map_type != BO_VM_ALL)) @@ -683,7 +985,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, list_add(&ctx->kfd_bo.tv.head, &ctx->list); i = 0; - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { + list_for_each_entry(entry, &mem->attachments, list) { if ((vm && vm != entry->bo_va->base.vm) || (entry->is_mapped != map_type && map_type != BO_VM_ALL)) @@ -696,17 +998,15 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, false, &ctx->duplicates); - if (!ret) - ctx->reserved = true; - else - pr_err("Failed to reserve buffers in ttm.\n"); - if (ret) { + pr_err("Failed to reserve buffers in ttm.\n"); kfree(ctx->vm_pd); ctx->vm_pd = NULL; + return ret; } - return ret; + ctx->reserved = true; + return 0; } /** @@ -739,47 +1039,56 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, return ret; } -static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, +static void unmap_bo_from_gpuvm(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; struct amdgpu_vm *vm = bo_va->base.vm; amdgpu_vm_bo_unmap(adev, bo_va, entry->va); amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); - amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); + amdgpu_sync_fence(sync, bo_va->last_pt_update); - return 0; + kfd_mem_dmaunmap_attachment(mem, entry); } -static int update_gpuvm_pte(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, - struct amdgpu_sync *sync) +static int update_gpuvm_pte(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync, + bool *table_freed) { - int ret; struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; + int ret; + + ret = kfd_mem_dmamap_attachment(mem, entry); + if (ret) + return ret; /* Update the page tables */ - ret = amdgpu_vm_bo_update(adev, bo_va, false); + ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); if (ret) { pr_err("amdgpu_vm_bo_update failed\n"); return ret; } - return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); + return amdgpu_sync_fence(sync, bo_va->last_pt_update); } -static int map_bo_to_gpuvm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, - bool no_update_pte) +static int map_bo_to_gpuvm(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync, + bool no_update_pte, + bool *table_freed) { int ret; /* Set virtual address for the allocation */ - ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, + ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, amdgpu_bo_size(entry->bo_va->base.bo), entry->pte_flags); if (ret) { @@ -791,7 +1100,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, if (no_update_pte) return 0; - ret = update_gpuvm_pte(adev, entry, sync); + ret = update_gpuvm_pte(mem, entry, sync, table_freed); if (ret) { pr_err("update_gpuvm_pte() failed\n"); goto update_gpuvm_pte_failed; @@ -800,7 +1109,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, return 0; update_gpuvm_pte_failed: - unmap_bo_from_gpuvm(adev, entry, sync); + unmap_bo_from_gpuvm(mem, entry, sync); return ret; } @@ -845,11 +1154,11 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info, list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { - struct amdgpu_bo *pd = peer_vm->root.base.bo; + struct amdgpu_bo *pd = peer_vm->root.bo; - ret = amdgpu_sync_resv(NULL, - sync, pd->tbo.base.resv, - AMDGPU_FENCE_OWNER_KFD, false); + ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, + AMDGPU_SYNC_NE_OWNER, + AMDGPU_FENCE_OWNER_KFD); if (ret) return ret; } @@ -892,7 +1201,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, info->eviction_fence = amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), - current->mm); + current->mm, + NULL); if (!info->eviction_fence) { pr_err("Failed to create eviction fence\n"); ret = -ENOMEM; @@ -911,7 +1221,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, vm->process_info = *process_info; /* Validate page directory and attach eviction fence */ - ret = amdgpu_bo_reserve(vm->root.base.bo, true); + ret = amdgpu_bo_reserve(vm->root.bo, true); if (ret) goto reserve_pd_fail; ret = vm_validate_pt_pd_bos(vm); @@ -919,16 +1229,16 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, pr_err("validate_pt_pd_bos() failed\n"); goto validate_pd_fail; } - ret = amdgpu_bo_sync_wait(vm->root.base.bo, + ret = amdgpu_bo_sync_wait(vm->root.bo, AMDGPU_FENCE_OWNER_KFD, false); if (ret) goto wait_pd_fail; - ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); + ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; - amdgpu_bo_fence(vm->root.base.bo, + amdgpu_bo_fence(vm->root.bo, &vm->process_info->eviction_fence->base, true); - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); /* Update process info */ mutex_lock(&vm->process_info->lock); @@ -942,7 +1252,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, reserve_shared_fail: wait_pd_fail: validate_pd_fail: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); reserve_pd_fail: vm->process_info = NULL; if (info) { @@ -959,67 +1269,47 @@ create_evict_fence_fail: return ret; } -int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, - void **vm, void **process_info, - struct dma_fence **ef) -{ - struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *new_vm; - int ret; - - new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL); - if (!new_vm) - return -ENOMEM; - - /* Initialize AMDGPU part of the VM */ - ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); - if (ret) { - pr_err("Failed init vm ret %d\n", ret); - goto amdgpu_vm_init_fail; - } - - /* Initialize KFD part of the VM and process info */ - ret = init_kfd_vm(new_vm, process_info, ef); - if (ret) - goto init_kfd_vm_fail; - - *vm = (void *) new_vm; - - return 0; - -init_kfd_vm_fail: - amdgpu_vm_fini(adev, new_vm); -amdgpu_vm_init_fail: - kfree(new_vm); - return ret; -} - int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, - struct file *filp, unsigned int pasid, - void **vm, void **process_info, + struct file *filp, u32 pasid, + void **process_info, struct dma_fence **ef) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct drm_file *drm_priv = filp->private_data; - struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; - struct amdgpu_vm *avm = &drv_priv->vm; + struct amdgpu_fpriv *drv_priv; + struct amdgpu_vm *avm; int ret; + ret = amdgpu_file_to_fpriv(filp, &drv_priv); + if (ret) + return ret; + avm = &drv_priv->vm; + /* Already a compute VM? */ if (avm->process_info) return -EINVAL; + /* Free the original amdgpu allocated pasid, + * will be replaced with kfd allocated pasid. + */ + if (avm->pasid) { + amdgpu_pasid_free(avm->pasid); + amdgpu_vm_set_pasid(adev, avm, 0); + } + /* Convert VM into a compute VM */ - ret = amdgpu_vm_make_compute(adev, avm, pasid); + ret = amdgpu_vm_make_compute(adev, avm); if (ret) return ret; + ret = amdgpu_vm_set_pasid(adev, avm, pasid); + if (ret) + return ret; /* Initialize KFD part of the VM and process info */ ret = init_kfd_vm(avm, process_info, ef); if (ret) return ret; - *vm = (void *)avm; + amdgpu_vm_set_task_info(avm); return 0; } @@ -1028,7 +1318,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdkfd_process_info *process_info = vm->process_info; - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; if (!process_info) return; @@ -1044,6 +1334,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, list_del(&vm->vm_list_node); mutex_unlock(&process_info->lock); + vm->process_info = NULL; + /* Release per-process resources when last compute VM is destroyed */ if (!process_info->n_vms) { WARN_ON(!list_empty(&process_info->kfd_bo_list)); @@ -1058,44 +1350,31 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } } -void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm; - if (WARN_ON(!kgd || !vm)) + if (WARN_ON(!kgd || !drm_priv)) return; - pr_debug("Destroying process vm %p\n", vm); - - /* Release the VM context */ - amdgpu_vm_fini(adev, avm); - kfree(vm); -} - -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) -{ - struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; - - if (WARN_ON(!kgd || !vm)) - return; + avm = drm_priv_to_vm(drm_priv); - pr_debug("Releasing process vm %p\n", vm); + pr_debug("Releasing process vm %p\n", avm); - /* The original pasid of amdgpu vm has already been - * released during making a amdgpu vm to a compute vm - * The current pasid is managed by kfd and will be - * released on kfd process destroy. Set amdgpu pasid - * to 0 to avoid duplicate release. - */ + /* The original pasid of amdgpu vm has already been + * released during making a amdgpu vm to a compute vm + * The current pasid is managed by kfd and will be + * released on kfd process destroy. Set amdgpu pasid + * to 0 to avoid duplicate release. + */ amdgpu_vm_release_compute(adev, avm); } -uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) { - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; - struct amdgpu_bo *pd = avm->root.base.bo; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdgpu_bo *pd = avm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); if (adev->asic_type < CHIP_VEGA10) @@ -1105,16 +1384,16 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct kgd_dev *kgd, uint64_t va, uint64_t size, - void *vm, struct kgd_mem **mem, + void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct amdgpu_bo_param bp; + struct drm_gem_object *gobj; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1122,24 +1401,23 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( /* * Check on which domain to allocate BO */ - if (flags & ALLOC_MEM_FLAGS_VRAM) { + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - } else if (flags & ALLOC_MEM_FLAGS_GTT) { + alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; - } else if (flags & ALLOC_MEM_FLAGS_USERPTR) { + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; - alloc_flags = 0; + alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; if (!offset || !*offset) return -EINVAL; user_addr = untagged_addr(*offset); - } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL | - ALLOC_MEM_FLAGS_MMIO_REMAP)) { + } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; bo_type = ttm_bo_type_sg; @@ -1158,9 +1436,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ret = -ENOMEM; goto err; } - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); mutex_init(&(*mem)->lock); - (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); + (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); /* Workaround for AQL queue wraparound bug. Map the same * memory twice. That means we only actually allocate half @@ -1175,26 +1453,26 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); if (ret) { - pr_debug("Insufficient system memory\n"); + pr_debug("Insufficient memory\n"); goto err_reserve_limit; } pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", va, size, domain_string(alloc_domain)); - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = 1; - bp.domain = alloc_domain; - bp.flags = alloc_flags; - bp.type = bo_type; - bp.resv = NULL; - ret = amdgpu_bo_create(adev, &bp, &bo); + ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, + bo_type, NULL, &gobj); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", - domain_string(alloc_domain), ret); + domain_string(alloc_domain), ret); goto err_bo_create; } + ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); + if (ret) { + pr_debug("Failed to allow vma node access. ret %d\n", ret); + goto err_node_allow; + } + bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; bo->tbo.ttm->sg = sg; @@ -1202,7 +1480,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( bo->kfd_bo = *mem; (*mem)->bo = bo; if (user_addr) - bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; + bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; (*mem)->va = va; (*mem)->domain = domain; @@ -1223,7 +1501,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); - amdgpu_bo_unref(&bo); + drm_vma_node_revoke(&gobj->vma_node, drm_priv); +err_node_allow: + drm_gem_object_put(gobj); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: @@ -1240,31 +1520,31 @@ err: } int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; - unsigned long bo_size = mem->bo->tbo.mem.size; - struct kfd_bo_va_list *entry, *tmp; + unsigned long bo_size = mem->bo->tbo.base.size; + struct kfd_mem_attachment *entry, *tmp; struct bo_vm_reservation_context ctx; struct ttm_validate_buffer *bo_list_entry; + unsigned int mapped_to_gpu_memory; int ret; + bool is_imported = false; mutex_lock(&mem->lock); - - if (mem->mapped_to_gpu_memory > 0) { - pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", - mem->va, bo_size); - mutex_unlock(&mem->lock); - return -EBUSY; - } - + mapped_to_gpu_memory = mem->mapped_to_gpu_memory; + is_imported = mem->is_imported; mutex_unlock(&mem->lock); /* lock is not needed after this, since mem is unused and will * be freed anyway */ - /* No more MMU notifiers */ - amdgpu_mn_unregister(mem->bo); + if (mapped_to_gpu_memory > 0) { + pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n", + mem->va, bo_size); + return -EBUSY; + } /* Make sure restore workers don't access the BO any more */ bo_list_entry = &mem->validate_list; @@ -1272,6 +1552,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( list_del(&bo_list_entry->head); mutex_unlock(&process_info->lock); + /* No more MMU notifiers */ + amdgpu_mn_unregister(mem->bo); + ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); if (unlikely(ret)) return ret; @@ -1285,13 +1568,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); - /* Remove from VM internal data structures */ - list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) - remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, - entry, bo_size); - ret = unreserve_bo_and_vms(&ctx, false, false); + /* Remove from VM internal data structures */ + list_for_each_entry_safe(entry, tmp, &mem->attachments, list) + kfd_mem_detach(entry); + /* Free the sync object */ amdgpu_sync_free(&mem->sync); @@ -1303,8 +1585,22 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( kfree(mem->bo->tbo.sg); } + /* Update the size of the BO being freed if it was allocated from + * VRAM and is not imported. + */ + if (size) { + if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) && + (!is_imported)) + *size = bo_size; + else + *size = 0; + } + /* Free the BO*/ - amdgpu_bo_unref(&mem->bo); + drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); + if (mem->dmabuf) + dma_buf_put(mem->dmabuf); + drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); kfree(mem); @@ -1312,17 +1608,16 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) + struct kgd_dev *kgd, struct kgd_mem *mem, + void *drm_priv, bool *table_freed) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); int ret; struct amdgpu_bo *bo; uint32_t domain; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; - struct kfd_bo_va_list *bo_va_entry = NULL; - struct kfd_bo_va_list *bo_va_entry_aql = NULL; unsigned long bo_size; bool is_invalid_userptr = false; @@ -1343,22 +1638,28 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( * concurrently and the queues are actually stopped */ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { - down_write(¤t->mm->mmap_sem); + mmap_write_lock(current->mm); is_invalid_userptr = atomic_read(&mem->invalid); - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); } mutex_lock(&mem->lock); domain = mem->domain; - bo_size = bo->tbo.mem.size; + bo_size = bo->tbo.base.size; pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue), - vm, domain_string(domain)); + avm, domain_string(domain)); - ret = reserve_bo_and_vm(mem, vm, &ctx); + if (!kfd_mem_is_attached(avm, mem)) { + ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); + if (ret) + goto out; + } + + ret = reserve_bo_and_vm(mem, avm, &ctx); if (unlikely(ret)) goto out; @@ -1368,25 +1669,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( * the next restore worker */ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && - bo->tbo.mem.mem_type == TTM_PL_SYSTEM) + bo->tbo.resource->mem_type == TTM_PL_SYSTEM) is_invalid_userptr = true; - if (check_if_add_bo_to_vm(avm, mem)) { - ret = add_bo_to_vm(adev, mem, avm, false, - &bo_va_entry); - if (ret) - goto add_bo_to_vm_failed; - if (mem->aql_queue) { - ret = add_bo_to_vm(adev, mem, avm, - true, &bo_va_entry_aql); - if (ret) - goto add_bo_to_vm_failed_aql; - } - } else { - ret = vm_validate_pt_pd_bos(avm); - if (unlikely(ret)) - goto add_bo_to_vm_failed; - } + ret = vm_validate_pt_pd_bos(avm); + if (unlikely(ret)) + goto out_unreserve; if (mem->mapped_to_gpu_memory == 0 && !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { @@ -1397,51 +1685,51 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ret = amdgpu_amdkfd_bo_validate(bo, domain, true); if (ret) { pr_debug("Validate failed\n"); - goto map_bo_to_gpuvm_failed; + goto out_unreserve; } } - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == vm && !entry->is_mapped) { - pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", - entry->va, entry->va + bo_size, - entry); + list_for_each_entry(entry, &mem->attachments, list) { + if (entry->bo_va->base.vm != avm || entry->is_mapped) + continue; - ret = map_bo_to_gpuvm(adev, entry, ctx.sync, - is_invalid_userptr); - if (ret) { - pr_err("Failed to map bo to gpuvm\n"); - goto map_bo_to_gpuvm_failed; - } + pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", + entry->va, entry->va + bo_size, entry); - ret = vm_update_pds(vm, ctx.sync); - if (ret) { - pr_err("Failed to update page directories\n"); - goto map_bo_to_gpuvm_failed; - } + ret = map_bo_to_gpuvm(mem, entry, ctx.sync, + is_invalid_userptr, table_freed); + if (ret) { + pr_err("Failed to map bo to gpuvm\n"); + goto out_unreserve; + } - entry->is_mapped = true; - mem->mapped_to_gpu_memory++; - pr_debug("\t INC mapping count %d\n", - mem->mapped_to_gpu_memory); + ret = vm_update_pds(avm, ctx.sync); + if (ret) { + pr_err("Failed to update page directories\n"); + goto out_unreserve; } + + entry->is_mapped = true; + mem->mapped_to_gpu_memory++; + pr_debug("\t INC mapping count %d\n", + mem->mapped_to_gpu_memory); } - if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count) + if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) amdgpu_bo_fence(bo, &avm->process_info->eviction_fence->base, true); ret = unreserve_bo_and_vms(&ctx, false, false); + /* Only apply no TLB flush on Aldebaran to + * workaround regressions on other Asics. + */ + if (table_freed && (adev->asic_type != CHIP_ALDEBARAN)) + *table_freed = true; + goto out; -map_bo_to_gpuvm_failed: - if (bo_va_entry_aql) - remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); -add_bo_to_vm_failed_aql: - if (bo_va_entry) - remove_bo_from_vm(adev, bo_va_entry, bo_size); -add_bo_to_vm_failed: +out_unreserve: unreserve_bo_and_vms(&ctx, false, false); out: mutex_unlock(&mem->process_info->lock); @@ -1450,19 +1738,18 @@ out: } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdkfd_process_info *process_info = - ((struct amdgpu_vm *)vm)->process_info; - unsigned long bo_size = mem->bo->tbo.mem.size; - struct kfd_bo_va_list *entry; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdkfd_process_info *process_info = avm->process_info; + unsigned long bo_size = mem->bo->tbo.base.size; + struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; int ret; mutex_lock(&mem->lock); - ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); + ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); if (unlikely(ret)) goto out; /* If no VMs were reserved, it means the BO wasn't actually mapped */ @@ -1471,42 +1758,36 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( goto unreserve_out; } - ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); + ret = vm_validate_pt_pd_bos(avm); if (unlikely(ret)) goto unreserve_out; pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue), - vm); - - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == vm && entry->is_mapped) { - pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", - entry->va, - entry->va + bo_size, - entry); - - ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); - if (ret == 0) { - entry->is_mapped = false; - } else { - pr_err("failed to unmap VA 0x%llx\n", - mem->va); - goto unreserve_out; - } + avm); - mem->mapped_to_gpu_memory--; - pr_debug("\t DEC mapping count %d\n", - mem->mapped_to_gpu_memory); - } + list_for_each_entry(entry, &mem->attachments, list) { + if (entry->bo_va->base.vm != avm || !entry->is_mapped) + continue; + + pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", + entry->va, entry->va + bo_size, entry); + + unmap_bo_from_gpuvm(mem, entry, ctx.sync); + entry->is_mapped = false; + + mem->mapped_to_gpu_memory--; + pr_debug("\t DEC mapping count %d\n", + mem->mapped_to_gpu_memory); } /* If BO is unmapped from all VMs, unfence it. It can be evicted if * required. */ if (mem->mapped_to_gpu_memory == 0 && - !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count) + !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && + !mem->bo->tbo.pin_count) amdgpu_amdkfd_remove_eviction_fence(mem->bo, process_info->eviction_fence); @@ -1590,6 +1871,16 @@ bo_reserve_failed: return ret; } +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem) +{ + struct amdgpu_bo *bo = mem->bo; + + amdgpu_bo_reserve(bo, true); + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_unreserve(bo); +} + int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, struct kfd_vm_fault_info *mem) { @@ -1606,21 +1897,22 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct dma_buf *dma_buf, - uint64_t va, void *vm, + uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct drm_gem_object *obj; struct amdgpu_bo *bo; - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + int ret; if (dma_buf->ops != &amdgpu_dmabuf_ops) /* Can't handle non-graphics buffers */ return -EINVAL; obj = dma_buf->priv; - if (obj->dev->dev_private != adev) + if (drm_to_adev(obj->dev) != adev) /* Can't handle buffers from other devices */ return -EINVAL; @@ -1634,20 +1926,29 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, if (!*mem) return -ENOMEM; + ret = drm_vma_node_allow(&obj->vma_node, drm_priv); + if (ret) { + kfree(mem); + return ret; + } + if (size) *size = amdgpu_bo_size(bo); if (mmap_offset) *mmap_offset = amdgpu_bo_mmap_offset(bo); - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); mutex_init(&(*mem)->lock); + (*mem)->alloc_flags = ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | - ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) + | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE + | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; - (*mem)->bo = amdgpu_bo_ref(bo); + drm_gem_object_get(&bo->tbo.base); + (*mem)->bo = bo; (*mem)->va = va; (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; @@ -1655,6 +1956,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, (*mem)->process_info = avm->process_info; add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); amdgpu_sync_create(&(*mem)->sync); + (*mem)->is_imported = true; return 0; } @@ -1674,10 +1976,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) { struct amdkfd_process_info *process_info = mem->process_info; - int invalid, evicted_bos; + int evicted_bos; int r = 0; - invalid = atomic_inc_return(&mem->invalid); + atomic_inc(&mem->invalid); evicted_bos = atomic_inc_return(&process_info->evicted_bos); if (evicted_bos == 1) { /* First eviction, stop the queues */ @@ -1749,19 +2051,26 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, /* Get updated user pages */ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); if (ret) { - pr_debug("%s: Failed to get user pages: %d\n", - __func__, ret); + pr_debug("Failed %d to get user pages\n", ret); + + /* Return -EFAULT bad address error as success. It will + * fail later with a VM fault if the GPU tries to access + * it. Better than hanging indefinitely with stalled + * user mode queues. + * + * Return other error -EBUSY or -ENOMEM to retry restore + */ + if (ret != -EFAULT) + return ret; + } else { - /* Return error -EBUSY or -ENOMEM, retry restore */ - return ret; + /* + * FIXME: Cannot ignore the return code, must hold + * notifier_lock + */ + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); } - /* - * FIXME: Cannot ignore the return code, must hold - * notifier_lock - */ - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); - /* Mark the BO as valid unless it was invalidated * again concurrently. */ @@ -1833,7 +2142,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, validate_list.head) { - struct kfd_bo_va_list *bo_va_entry; + struct kfd_mem_attachment *attachment; bo = mem->bo; @@ -1856,13 +2165,12 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) * VM faults if the GPU tries to access the invalid * memory. */ - list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { - if (!bo_va_entry->is_mapped) + list_for_each_entry(attachment, &mem->attachments, list) { + if (!attachment->is_mapped) continue; - ret = update_gpuvm_pte((struct amdgpu_device *) - bo_va_entry->kgd_dev, - bo_va_entry, &sync); + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync, NULL); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -1987,6 +2295,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) int ret = 0, i; struct list_head duplicate_save; struct amdgpu_sync sync_obj; + unsigned long failed_size = 0; + unsigned long total_size = 0; INIT_LIST_HEAD(&duplicate_save); INIT_LIST_HEAD(&ctx.list); @@ -2041,24 +2351,32 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; - struct kfd_bo_va_list *bo_va_entry; + struct kfd_mem_attachment *attachment; + + total_size += amdgpu_bo_size(bo); ret = amdgpu_amdkfd_bo_validate(bo, domain, false); if (ret) { - pr_debug("Memory eviction: Validate BOs failed. Try again\n"); - goto validate_map_fail; + pr_debug("Memory eviction: Validate BOs failed\n"); + failed_size += amdgpu_bo_size(bo); + ret = amdgpu_amdkfd_bo_validate(bo, + AMDGPU_GEM_DOMAIN_GTT, false); + if (ret) { + pr_debug("Memory eviction: Try again\n"); + goto validate_map_fail; + } } - ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false); + ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); if (ret) { pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); goto validate_map_fail; } - list_for_each_entry(bo_va_entry, &mem->bo_va_list, - bo_list) { - ret = update_gpuvm_pte((struct amdgpu_device *) - bo_va_entry->kgd_dev, - bo_va_entry, - &sync_obj); + list_for_each_entry(attachment, &mem->attachments, list) { + if (!attachment->is_mapped) + continue; + + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); goto validate_map_fail; @@ -2066,6 +2384,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) } } + if (failed_size) + pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); + /* Update page directories */ ret = process_update_pds(process_info, &sync_obj); if (ret) { @@ -2082,7 +2403,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) */ new_fence = amdgpu_amdkfd_fence_create( process_info->eviction_fence->base.context, - process_info->eviction_fence->mm); + process_info->eviction_fence->mm, + NULL); if (!new_fence) { pr_err("Failed to create eviction fence\n"); ret = -ENOMEM; @@ -2101,7 +2423,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) /* Attach eviction fence to PD / PT BOs */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { - struct amdgpu_bo *bo = peer_vm->root.base.bo; + struct amdgpu_bo *bo = peer_vm->root.bo; amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); } @@ -2129,6 +2451,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem return -ENOMEM; mutex_init(&(*mem)->lock); + INIT_LIST_HEAD(&(*mem)->attachments); (*mem)->bo = amdgpu_bo_ref(gws_bo); (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; (*mem)->process_info = process_info; @@ -2203,3 +2526,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) kfree(mem); return 0; } + +/* Returns GPU-specific tiling mode information */ +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + /* Those values are not set from GFX9 onwards */ + config->num_banks = adev->gfx.config.num_banks; + config->num_ranks = adev->gfx.config.num_ranks; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 72232fccf61a..96b7bb13a2dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -148,7 +148,7 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev) if (i2c.valid) { sprintf(stmp, "0x%x", i2c.i2c_id); - adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp); + adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp); } gpio = (ATOM_GPIO_I2C_ASSIGMENT *) ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); @@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { - uint8_t con_obj_id, con_obj_num, con_obj_type; - - con_obj_id = + uint8_t con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - con_obj_num = - (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) - >> ENUM_ID_SHIFT; - con_obj_type = - (le16_to_cpu(path->usConnObjectId) & - OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; /* Skip TV/CV support */ if ((le16_to_cpu(path->usDeviceTag) == @@ -373,15 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { - uint8_t grph_obj_id, grph_obj_num, grph_obj_type; - - grph_obj_id = - (le16_to_cpu(path->usGraphicObjIds[j]) & - OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - grph_obj_num = - (le16_to_cpu(path->usGraphicObjIds[j]) & - ENUM_ID_MASK) >> ENUM_ID_SHIFT; - grph_obj_type = + uint8_t grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; @@ -557,7 +541,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * } } - amdgpu_link_encoder_connector(adev->ddev); + amdgpu_link_encoder_connector(adev_to_drm(adev)); return true; } @@ -1248,157 +1232,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device * return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); } -int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, - u16 *leakage_id) -{ - union set_voltage args; - int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); - u8 frev, crev; - - if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) - return -EINVAL; - - switch (crev) { - case 3: - case 4: - args.v3.ucVoltageType = 0; - args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID; - args.v3.usVoltageLevel = 0; - - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); - - *leakage_id = le16_to_cpu(args.v3.usVoltageLevel); - break; - default: - DRM_ERROR("Unknown table version %d, %d\n", frev, crev); - return -EINVAL; - } - - return 0; -} - -int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev, - u16 *vddc, u16 *vddci, - u16 virtual_voltage_id, - u16 vbios_voltage_id) -{ - int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); - u8 frev, crev; - u16 data_offset, size; - int i, j; - ATOM_ASIC_PROFILING_INFO_V2_1 *profile; - u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; - - *vddc = 0; - *vddci = 0; - - if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) - return -EINVAL; - - profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) - (adev->mode_info.atom_context->bios + data_offset); - - switch (frev) { - case 1: - return -EINVAL; - case 2: - switch (crev) { - case 1: - if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1)) - return -EINVAL; - leakage_bin = (u16 *) - (adev->mode_info.atom_context->bios + data_offset + - le16_to_cpu(profile->usLeakageBinArrayOffset)); - vddc_id_buf = (u16 *) - (adev->mode_info.atom_context->bios + data_offset + - le16_to_cpu(profile->usElbVDDC_IdArrayOffset)); - vddc_buf = (u16 *) - (adev->mode_info.atom_context->bios + data_offset + - le16_to_cpu(profile->usElbVDDC_LevelArrayOffset)); - vddci_id_buf = (u16 *) - (adev->mode_info.atom_context->bios + data_offset + - le16_to_cpu(profile->usElbVDDCI_IdArrayOffset)); - vddci_buf = (u16 *) - (adev->mode_info.atom_context->bios + data_offset + - le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset)); - - if (profile->ucElbVDDC_Num > 0) { - for (i = 0; i < profile->ucElbVDDC_Num; i++) { - if (vddc_id_buf[i] == virtual_voltage_id) { - for (j = 0; j < profile->ucLeakageBinNum; j++) { - if (vbios_voltage_id <= leakage_bin[j]) { - *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; - break; - } - } - break; - } - } - } - if (profile->ucElbVDDCI_Num > 0) { - for (i = 0; i < profile->ucElbVDDCI_Num; i++) { - if (vddci_id_buf[i] == virtual_voltage_id) { - for (j = 0; j < profile->ucLeakageBinNum; j++) { - if (vbios_voltage_id <= leakage_bin[j]) { - *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i]; - break; - } - } - break; - } - } - } - break; - default: - DRM_ERROR("Unknown table version %d, %d\n", frev, crev); - return -EINVAL; - } - break; - default: - DRM_ERROR("Unknown table version %d, %d\n", frev, crev); - return -EINVAL; - } - - return 0; -} - -union get_voltage_info { - struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in; - struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out; -}; - -int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev, - u16 virtual_voltage_id, - u16 *voltage) -{ - int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo); - u32 entry_id; - u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; - union get_voltage_info args; - - for (entry_id = 0; entry_id < count; entry_id++) { - if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v == - virtual_voltage_id) - break; - } - - if (entry_id >= count) - return -EINVAL; - - args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; - args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; - args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id); - args.in.ulSCLKFreq = - cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); - - amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); - - *voltage = le16_to_cpu(args.evv_out.usVoltageLevel); - - return 0; -} - union voltage_object_info { struct _ATOM_VOLTAGE_OBJECT_INFO v1; struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2; @@ -1417,7 +1250,7 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL { u32 size = le16_to_cpu(v3->sHeader.usStructureSize); u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]); - u8 *start = (u8*)v3; + u8 *start = (u8 *)v3; while (offset < size) { ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); @@ -1802,9 +1635,9 @@ static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev) (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { /* Firmware request VRAM reservation for SR-IOV */ - adev->fw_vram_usage.start_offset = (start_addr & + adev->mman.fw_vram_usage_start_offset = (start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; - adev->fw_vram_usage.size = size << 10; + adev->mman.fw_vram_usage_size = size << 10; /* Use the default scratch size */ usage_bytes = 0; } else { @@ -1898,7 +1731,7 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) */ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) { - struct amdgpu_device *adev = info->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(info->dev); WREG32(reg, val); } @@ -1914,61 +1747,36 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) */ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) { - struct amdgpu_device *adev = info->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(info->dev); uint32_t r; r = RREG32(reg); return r; } -/** - * cail_ioreg_write - write IO register - * - * @info: atom card_info pointer - * @reg: IO register offset - * @val: value to write to the pll register - * - * Provides a IO register accessor for the atom interpreter (r4xx+). - */ -static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) -{ - struct amdgpu_device *adev = info->dev->dev_private; - - WREG32_IO(reg, val); -} - -/** - * cail_ioreg_read - read IO register - * - * @info: atom card_info pointer - * @reg: IO register offset - * - * Provides an IO register accessor for the atom interpreter (r4xx+). - * Returns the value of the IO register. - */ -static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) -{ - struct amdgpu_device *adev = info->dev->dev_private; - uint32_t r; - - r = RREG32_IO(reg); - return r; -} - static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct atom_context *ctx = adev->mode_info.atom_context; - return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); + return sysfs_emit(buf, "%s\n", ctx->vbios_version); } static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, NULL); +static struct attribute *amdgpu_vbios_version_attrs[] = { + &dev_attr_vbios_version.attr, + NULL +}; + +const struct attribute_group amdgpu_vbios_version_attr_group = { + .attrs = amdgpu_vbios_version_attrs +}; + /** * amdgpu_atombios_fini - free the driver info and callbacks for atombios * @@ -1988,7 +1796,6 @@ void amdgpu_atombios_fini(struct amdgpu_device *adev) adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); adev->mode_info.atom_card_info = NULL; - device_remove_file(adev->dev, &dev_attr_vbios_version); } /** @@ -2005,24 +1812,14 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) { struct card_info *atom_card_info = kzalloc(sizeof(struct card_info), GFP_KERNEL); - int ret; if (!atom_card_info) return -ENOMEM; adev->mode_info.atom_card_info = atom_card_info; - atom_card_info->dev = adev->ddev; + atom_card_info->dev = adev_to_drm(adev); atom_card_info->reg_read = cail_reg_read; atom_card_info->reg_write = cail_reg_write; - /* needed for iio ops */ - if (adev->rio_mem) { - atom_card_info->ioreg_read = cail_ioreg_read; - atom_card_info->ioreg_write = cail_ioreg_write; - } else { - DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); - atom_card_info->ioreg_read = cail_reg_read; - atom_card_info->ioreg_write = cail_reg_write; - } atom_card_info->mc_read = cail_mc_read; atom_card_info->mc_write = cail_mc_write; atom_card_info->pll_read = cail_pll_read; @@ -2038,22 +1835,31 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) if (adev->is_atom_fw) { amdgpu_atomfirmware_scratch_regs_init(adev); amdgpu_atomfirmware_allocate_fb_scratch(adev); - ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev); - if (ret) { - DRM_ERROR("Failed to get mem train fb location.\n"); - return ret; - } + /* cached firmware_flags for further usage */ + adev->mode_info.firmware_flags = + amdgpu_atomfirmware_query_firmware_capability(adev); } else { amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_allocate_fb_scratch(adev); } - ret = device_create_file(adev->dev, &dev_attr_vbios_version); - if (ret) { - DRM_ERROR("Failed to create device file for VBIOS version\n"); - return ret; - } - return 0; } +int amdgpu_atombios_get_data_table(struct amdgpu_device *adev, + uint32_t table, + uint16_t *size, + uint8_t *frev, + uint8_t *crev, + uint8_t **addr) +{ + uint16_t data_start; + + if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, + size, frev, crev, &data_start)) + return -EINVAL; + + *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index fd8f18074f7a..8cc0222dba19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -168,18 +168,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, u32 eng_clock, u32 mem_clock); -int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, - u16 *leakage_id); - -int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev, - u16 *vddc, u16 *vddci, - u16 virtual_voltage_id, - u16 vbios_voltage_id); - -int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev, - u16 virtual_voltage_id, - u16 *voltage); - bool amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev, u8 voltage_type, u8 voltage_mode); @@ -216,6 +204,13 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev, u8 voltage_type, u8 *svd_gpio_id, u8 *svc_gpio_id); +int amdgpu_atombios_get_data_table(struct amdgpu_device *adev, + uint32_t table, + uint16_t *size, + uint8_t *frev, + uint8_t *crev, + uint8_t **addr); + void amdgpu_atombios_fini(struct amdgpu_device *adev); int amdgpu_atombios_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index ff4eb96bdfb5..97178b307ed6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -29,23 +29,59 @@ #include "atombios.h" #include "soc15_hw_ip.h" -bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) +union firmware_info { + struct atom_firmware_info_v3_1 v31; + struct atom_firmware_info_v3_2 v32; + struct atom_firmware_info_v3_3 v33; + struct atom_firmware_info_v3_4 v34; +}; + +/* + * Helper function to query firmware capability + * + * @adev: amdgpu_device pointer + * + * Return firmware_capability in firmwareinfo table on success or 0 if not + */ +uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev) { - int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); - uint16_t data_offset; + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index; + u16 data_offset, size; + union firmware_info *firmware_info; + u8 frev, crev; + u32 fw_cap = 0; - if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, - NULL, NULL, &data_offset)) { - struct atom_firmware_info_v3_1 *firmware_info = - (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + - data_offset); + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); - if (le32_to_cpu(firmware_info->firmware_capability) & - ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) - return true; + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, + index, &size, &frev, &crev, &data_offset)) { + /* support firmware_info 3.1 + */ + if ((frev == 3 && crev >=1) || (frev > 3)) { + firmware_info = (union firmware_info *) + (mode_info->atom_context->bios + data_offset); + fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability); + } } - return false; + + return fw_cap; +} + +/* + * Helper function to query gpu virtualizaiton capability + * + * @adev: amdgpu_device pointer + * + * Return true if gpu virtualization is supported or false if not + */ +bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev) +{ + u32 fw_cap; + + fw_cap = adev->mode_info.firmware_flags; + + return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false; } void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) @@ -70,7 +106,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) struct atom_context *ctx = adev->mode_info.atom_context; int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_usagebyfirmware); - struct vram_usagebyfirmware_v2_1 * firmware_usage; + struct vram_usagebyfirmware_v2_1 *firmware_usage; uint32_t start_addr, size; uint16_t data_offset; int usage_bytes = 0; @@ -89,9 +125,9 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { /* Firmware request VRAM reservation for SR-IOV */ - adev->fw_vram_usage.start_offset = (start_addr & + adev->mman.fw_vram_usage_start_offset = (start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; - adev->fw_vram_usage.size = size << 10; + adev->mman.fw_vram_usage_size = size << 10; /* Use the default scratch size */ usage_bytes = 0; } else { @@ -111,20 +147,27 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) union igp_info { struct atom_integrated_system_info_v1_11 v11; + struct atom_integrated_system_info_v1_12 v12; + struct atom_integrated_system_info_v2_1 v21; }; union umc_info { struct atom_umc_info_v3_1 v31; + struct atom_umc_info_v3_2 v32; + struct atom_umc_info_v3_3 v33; }; union vram_info { struct atom_vram_info_header_v2_3 v23; struct atom_vram_info_header_v2_4 v24; + struct atom_vram_info_header_v2_5 v25; + struct atom_vram_info_header_v2_6 v26; }; union vram_module { struct atom_vram_module_v9 v9; struct atom_vram_module_v10 v10; + struct atom_vram_module_v11 v11; }; static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, @@ -146,6 +189,10 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, case LpDdr4MemType: vram_type = AMDGPU_VRAM_TYPE_DDR4; break; + case Ddr5MemType: + case LpDdr5MemType: + vram_type = AMDGPU_VRAM_TYPE_DDR5; + break; default: vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; break; @@ -156,6 +203,7 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, vram_type = AMDGPU_VRAM_TYPE_GDDR5; break; case ATOM_DGPU_VRAM_TYPE_HBM2: + case ATOM_DGPU_VRAM_TYPE_HBM2E: vram_type = AMDGPU_VRAM_TYPE_HBM; break; case ATOM_DGPU_VRAM_TYPE_GDDR6: @@ -202,15 +250,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, if (adev->flags & AMD_IS_APU) { igp_info = (union igp_info *) (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 11: - mem_channel_number = igp_info->v11.umachannelnumber; - /* channel width is 64 */ - if (vram_width) - *vram_width = mem_channel_number * 64; - mem_type = igp_info->v11.memorytype; - if (vram_type) - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + switch (frev) { + case 1: + switch (crev) { + case 11: + case 12: + mem_channel_number = igp_info->v11.umachannelnumber; + if (!mem_channel_number) + mem_channel_number = 1; + /* channel width is 64 */ + if (vram_width) + *vram_width = mem_channel_number * 64; + mem_type = igp_info->v11.memorytype; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; + default: + return -EINVAL; + } + break; + case 2: + switch (crev) { + case 1: + case 2: + mem_channel_number = igp_info->v21.umachannelnumber; + if (!mem_channel_number) + mem_channel_number = 1; + /* channel width is 64 */ + if (vram_width) + *vram_width = mem_channel_number * 64; + mem_type = igp_info->v21.memorytype; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -260,6 +335,46 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, if (vram_vendor) *vram_vendor = mem_vendor; break; + case 5: + if (module_id > vram_info->v25.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v25.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v11.vram_module_size); + i++; + } + mem_type = vram_module->v11.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v11.channel_num; + mem_channel_width = vram_module->v11.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; + case 6: + if (module_id > vram_info->v26.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v26.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v9.vram_module_size); + i++; + } + mem_type = vram_module->v9.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v9.channel_num; + mem_channel_width = vram_module->v9.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; default: return -EINVAL; } @@ -282,59 +397,133 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev) union umc_info *umc_info; u8 frev, crev; bool ecc_default_enabled = false; + u8 umc_config; + u32 umc_config1; index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info); if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { - /* support umc_info 3.1+ */ - if ((frev == 3 && crev >= 1) || (frev > 3)) { + if (frev == 3) { umc_info = (union umc_info *) (mode_info->atom_context->bios + data_offset); - ecc_default_enabled = - (le32_to_cpu(umc_info->v31.umc_config) & - UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; + switch (crev) { + case 1: + umc_config = le32_to_cpu(umc_info->v31.umc_config); + ecc_default_enabled = + (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; + break; + case 2: + umc_config = le32_to_cpu(umc_info->v32.umc_config); + ecc_default_enabled = + (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; + break; + case 3: + umc_config = le32_to_cpu(umc_info->v33.umc_config); + umc_config1 = le32_to_cpu(umc_info->v33.umc_config1); + ecc_default_enabled = + ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) || + (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false; + break; + default: + /* unsupported crev */ + return false; + } } } return ecc_default_enabled; } -union firmware_info { - struct atom_firmware_info_v3_1 v31; -}; - /* + * Helper function to query sram ecc capablity + * + * @adev: amdgpu_device pointer + * * Return true if vbios supports sram ecc or false if not */ bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev) { + u32 fw_cap; + + fw_cap = adev->mode_info.firmware_flags; + + return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false; +} + +/* + * Helper function to query dynamic boot config capability + * + * @adev: amdgpu_device pointer + * + * Return true if vbios supports dynamic boot config or false if not + */ +bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev) +{ + u32 fw_cap; + + fw_cap = adev->mode_info.firmware_flags; + + return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false; +} + +/** + * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS + * adev: amdgpu_device pointer + * i2c_address: pointer to u8; if not NULL, will contain + * the RAS EEPROM address if the function returns true + * + * Return true if VBIOS supports RAS EEPROM address reporting, + * else return false. If true and @i2c_address is not NULL, + * will contain the RAS ROM address. + */ +bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, + u8 *i2c_address) +{ struct amdgpu_mode_info *mode_info = &adev->mode_info; int index; u16 data_offset, size; union firmware_info *firmware_info; u8 frev, crev; - bool sram_ecc_supported = false; index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); + firmwareinfo); if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, - index, &size, &frev, &crev, &data_offset)) { - /* support firmware_info 3.1 + */ - if ((frev == 3 && crev >=1) || (frev > 3)) { + index, &size, &frev, &crev, + &data_offset)) { + /* support firmware_info 3.4 + */ + if ((frev == 3 && crev >=4) || (frev > 3)) { firmware_info = (union firmware_info *) (mode_info->atom_context->bios + data_offset); - sram_ecc_supported = - (le32_to_cpu(firmware_info->v31.firmware_capability) & - ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false; + /* The ras_rom_i2c_slave_addr should ideally + * be a 19-bit EEPROM address, which would be + * used as is by the driver; see top of + * amdgpu_eeprom.c. + * + * When this is the case, 0 is of course a + * valid RAS EEPROM address, in which case, + * we'll drop the first "if (firm...)" and only + * leave the check for the pointer. + * + * The reason this works right now is because + * ras_rom_i2c_slave_addr contains the EEPROM + * device type qualifier 1010b in the top 4 + * bits. + */ + if (firmware_info->v34.ras_rom_i2c_slave_addr) { + if (i2c_address) + *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr; + return true; + } } } - return sram_ecc_supported; + return false; } + union smu_info { struct atom_smu_info_v3_1 v31; }; @@ -364,10 +553,6 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) adev->pm.current_sclk = adev->clock.default_sclk; adev->pm.current_mclk = adev->clock.default_mclk; - /* not technically a clock, but... */ - adev->mode_info.firmware_flags = - le32_to_cpu(firmware_info->v31.firmware_capability); - ret = 0; } @@ -417,11 +602,27 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) ret = 0; } + /* if asic is Navi+, the rlc reference clock is used for system clock + * from vbios gfx_info table */ + if (adev->asic_type >= CHIP_NAVI10) { + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + gfx_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + struct atom_gfx_info_v2_2 *gfx_info = (struct atom_gfx_info_v2_2*) + (mode_info->atom_context->bios + data_offset); + if ((frev == 2) && (crev >= 2)) + spll->reference_freq = le32_to_cpu(gfx_info->rlc_gpu_timer_refclk); + ret = 0; + } + } + return ret; } union gfx_info { - struct atom_gfx_info_v2_4 v24; + struct atom_gfx_info_v2_4 v24; + struct atom_gfx_info_v2_7 v27; }; int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) @@ -456,6 +657,22 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu; adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); return 0; + case 7: + adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines; + adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh; + adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se; + adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se; + adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches; + adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs); + adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds; + adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth; + adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth); + adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer; + adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size); + adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd); + adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu; + adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size); + return 0; default: return -EINVAL; } @@ -465,136 +682,52 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) } /* - * Check if VBIOS supports GDDR6 training data save/restore + * Helper function to query two stage mem training capability + * + * @adev: amdgpu_device pointer + * + * Return true if two stage mem training is supported or false if not */ -static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev) -{ - uint16_t data_offset; - int index; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); - if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, - NULL, NULL, &data_offset)) { - struct atom_firmware_info_v3_1 *firmware_info = - (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + - data_offset); - - DRM_DEBUG("atom firmware capability:0x%08x.\n", - le32_to_cpu(firmware_info->firmware_capability)); - - if (le32_to_cpu(firmware_info->firmware_capability) & - ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) - return true; - } - - return false; -} - -static int gddr6_mem_train_support(struct amdgpu_device *adev) +bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev) { - int ret; - uint32_t major, minor, revision, hw_v; - - if (gddr6_mem_train_vbios_support(adev)) { - amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision); - hw_v = HW_REV(major, minor, revision); - /* - * treat 0 revision as a special case since register for MP0 and MMHUB is missing - * for some Navi10 A0, preventing driver from discovering the hwip information since - * none of the functions will be initialized, it should not cause any problems - */ - switch (hw_v) { - case HW_REV(11, 0, 0): - case HW_REV(11, 0, 5): - ret = 1; - break; - default: - DRM_ERROR("memory training vbios supports but psp hw(%08x)" - " doesn't support!\n", hw_v); - ret = -1; - break; - } - } else { - ret = 0; - hw_v = -1; - } + u32 fw_cap; + fw_cap = adev->mode_info.firmware_flags; - DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret); - return ret; + return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false; } -int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev) +int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) { struct atom_context *ctx = adev->mode_info.atom_context; - unsigned char *bios = ctx->bios; - struct vram_reserve_block *reserved_block; - int index, block_number; - uint8_t frev, crev; - uint16_t data_offset, size; - uint32_t start_address_in_kb; - uint64_t offset; - int ret; + union firmware_info *firmware_info; + int index; + u16 data_offset, size; + u8 frev, crev; + int fw_reserved_fb_size; - adev->fw_vram_usage.mem_train_support = false; + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); - if (adev->asic_type != CHIP_NAVI10 && - adev->asic_type != CHIP_NAVI14) + if (!amdgpu_atom_parse_data_header(ctx, index, &size, + &frev, &crev, &data_offset)) + /* fail to parse data_header */ return 0; - if (amdgpu_sriov_vf(adev)) - return 0; + firmware_info = (union firmware_info *)(ctx->bios + data_offset); - ret = gddr6_mem_train_support(adev); - if (ret == -1) + if (frev !=3) return -EINVAL; - else if (ret == 0) - return 0; - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - vram_usagebyfirmware); - ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, - &data_offset); - if (ret == 0) { - DRM_ERROR("parse data header failed.\n"); - return -EINVAL; + switch (crev) { + case 4: + fw_reserved_fb_size = + (firmware_info->v34.fw_reserved_size_in_kb << 10); + break; + default: + fw_reserved_fb_size = 0; + break; } - DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x," - " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset); - /* only support 2.1+ */ - if (((uint16_t)frev << 8 | crev) < 0x0201) { - DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev); - return -EINVAL; - } - - reserved_block = (struct vram_reserve_block *) - (bios + data_offset + sizeof(struct atom_common_table_header)); - block_number = ((unsigned int)size - sizeof(struct atom_common_table_header)) - / sizeof(struct vram_reserve_block); - reserved_block += (block_number > 0) ? block_number-1 : 0; - DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n", - block_number, - le32_to_cpu(reserved_block->start_address_in_kb), - le16_to_cpu(reserved_block->used_by_firmware_in_kb), - le16_to_cpu(reserved_block->used_by_driver_in_kb)); - if (reserved_block->used_by_firmware_in_kb > 0) { - start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb); - offset = (uint64_t)start_address_in_kb * ONE_KiB; - if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) { - offset -= ONE_MiB; - } - - offset &= ~(ONE_MiB - 1); - adev->fw_vram_usage.mem_train_fb_loc = offset; - adev->fw_vram_usage.mem_train_support = true; - DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset); - ret = 0; - } else { - DRM_ERROR("used_by_firmware_in_kb is 0!\n"); - ret = -EINVAL; - } - - return ret; + return fw_reserved_fb_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index f871af5ea6f3..751248b253de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -26,15 +26,19 @@ #define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t)) -bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); +uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev); +bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, int *vram_width, int *vram_type, int *vram_vendor); -int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev); +bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address); +bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev); +bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 3e35a8f2c5e5..7abe9500c0c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -616,7 +616,7 @@ static bool amdgpu_atpx_detect(void) while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; - has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); + has_atpx |= amdgpu_atpx_pci_probe_handle(pdev); parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; @@ -626,7 +626,7 @@ static bool amdgpu_atpx_detect(void) while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { vga_count++; - has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); + has_atpx |= amdgpu_atpx_pci_probe_handle(pdev); parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index d1495e1c9289..313517f7cf10 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, - false, false); + false, false, false); if (r) goto exit_do_move; r = dma_fence_wait(fence, false); @@ -85,6 +85,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, bp.flags = 0; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + n = AMDGPU_BENCHMARK_ITERATIONS; r = amdgpu_bo_create(adev, &bp, &sobj); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 50dff69a0f6e..27b19503773b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -97,6 +97,10 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) if (amdgpu_device_need_post(adev)) return false; + /* FB BAR not enabled */ + if (pci_resource_len(adev->pdev, 0) == 0) + return false; + adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); bios = ioremap_wc(vram_base, size); @@ -155,7 +159,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0}; int len; - if (!adev->asic_funcs->read_bios_from_rom) + if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom) return false; /* validate VBIOS signature */ @@ -192,30 +196,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios; adev->bios = NULL; - bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false; - memcpy_fromio(adev->bios, bios, size); + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; - if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); - return false; - } + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios); - adev->bios_size = size; + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen; return true; +free_bios: + kfree(adev->bios); + return false; } #ifdef CONFIG_ACPI @@ -286,7 +295,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } @@ -299,7 +308,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } @@ -311,7 +320,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) adev->bios = kmalloc(size, GFP_KERNEL); if (!adev->bios) { - DRM_ERROR("Unable to allocate bios\n"); + dev_err(adev->dev, "Unable to allocate bios\n"); return false; } @@ -343,7 +352,8 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return igp_read_bios_from_vram(adev); else - return amdgpu_asic_read_disabled_bios(adev); + return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ? + false : amdgpu_asic_read_disabled_bios(adev); } #ifdef CONFIG_ACPI @@ -358,7 +368,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) return false; tbl_size = hdr->length; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { - DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); + dev_info(adev->dev, "ACPI VFCT table present but broken (too short #1),skipping\n"); return false; } @@ -371,13 +381,13 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) offset += sizeof(VFCT_IMAGE_HEADER); if (offset > tbl_size) { - DRM_ERROR("ACPI VFCT image header truncated\n"); + dev_info(adev->dev, "ACPI VFCT image header truncated,skipping\n"); return false; } offset += vhdr->ImageLength; if (offset > tbl_size) { - DRM_ERROR("ACPI VFCT image truncated\n"); + dev_info(adev->dev, "ACPI VFCT image truncated,skipping\n"); return false; } @@ -400,7 +410,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) } } - DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); + dev_info(adev->dev, "ACPI VFCT table present but broken (too short #2),skipping\n"); return false; } #else @@ -412,28 +422,42 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) bool amdgpu_get_bios(struct amdgpu_device *adev) { - if (amdgpu_atrm_get_bios(adev)) + if (amdgpu_atrm_get_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ATRM\n"); goto success; + } - if (amdgpu_acpi_vfct_bios(adev)) + if (amdgpu_acpi_vfct_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VFCT\n"); goto success; + } - if (igp_read_bios_from_vram(adev)) + if (igp_read_bios_from_vram(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n"); goto success; + } - if (amdgpu_read_bios(adev)) + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); goto success; + } - if (amdgpu_read_bios_from_rom(adev)) + if (amdgpu_read_bios_from_rom(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM\n"); goto success; + } - if (amdgpu_read_disabled_bios(adev)) + if (amdgpu_read_disabled_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from disabled ROM BAR\n"); goto success; + } - if (amdgpu_read_platform_bios(adev)) + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); goto success; + } - DRM_ERROR("Unable to locate a BIOS ROM\n"); + dev_err(adev->dev, "Unable to locate a BIOS ROM\n"); return false; success: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 85b0515c0fdc..714178f1b6c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -61,7 +61,7 @@ static void amdgpu_bo_list_free(struct kref *ref) int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_bo_list_entry *info, - unsigned num_entries, struct amdgpu_bo_list **result) + size_t num_entries, struct amdgpu_bo_list **result) { unsigned last_entry = 0, first_userptr = num_entries; struct amdgpu_bo_list_entry *array; @@ -102,7 +102,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, } bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm) { @@ -265,7 +265,7 @@ error_free: int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index a130e766cbdb..044b41f0bfd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -34,6 +34,7 @@ struct amdgpu_fpriv; struct amdgpu_bo_list_entry { struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; + struct dma_fence_chain *chain; uint32_t priority; struct page **user_pages; bool user_invalidated; @@ -60,7 +61,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_bo_list_entry *info, - unsigned num_entries, + size_t num_entries, struct amdgpu_bo_list **list); static inline struct amdgpu_bo_list_entry * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 031b094607bd..f1a050379190 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -60,8 +60,6 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, { CGS_FUNC_ADEV; switch (space) { - case CGS_IND_REG__MMIO: - return RREG32_IDX(index); case CGS_IND_REG__PCIE: return RREG32_PCIE(index); case CGS_IND_REG__SMC: @@ -77,6 +75,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; + default: + BUG(); } WARN(1, "Invalid indirect register space"); return 0; @@ -88,8 +88,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, { CGS_FUNC_ADEV; switch (space) { - case CGS_IND_REG__MMIO: - return WREG32_IDX(index, value); case CGS_IND_REG__PCIE: return WREG32_PCIE(index, value); case CGS_IND_REG__SMC: @@ -105,6 +103,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; + default: + BUG(); } WARN(1, "Invalid indirect register space"); } @@ -352,17 +352,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, break; case CHIP_POLARIS11: if (type == CGS_UCODE_ID_SMU) { - if (((adev->pdev->device == 0x67ef) && - ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe5))) || - ((adev->pdev->device == 0x67ff) && - ((adev->pdev->revision == 0xcf) || - (adev->pdev->revision == 0xef) || - (adev->pdev->revision == 0xff)))) { + if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); - } else if ((adev->pdev->device == 0x67ef) && - (adev->pdev->revision == 0xe2)) { + } else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); } else { @@ -374,21 +367,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, break; case CHIP_POLARIS10: if (type == CGS_UCODE_ID_SMU) { - if (((adev->pdev->device == 0x67df) && - ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe3) || - (adev->pdev->revision == 0xe4) || - (adev->pdev->revision == 0xe5) || - (adev->pdev->revision == 0xe7) || - (adev->pdev->revision == 0xef))) || - ((adev->pdev->device == 0x6fdf) && - ((adev->pdev->revision == 0xef) || - (adev->pdev->revision == 0xff)))) { + if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); - } else if ((adev->pdev->device == 0x67df) && - ((adev->pdev->revision == 0xe1) || - (adev->pdev->revision == 0xf7))) { + } else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); } else { @@ -399,13 +381,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, } break; case CHIP_POLARIS12: - if (((adev->pdev->device == 0x6987) && - ((adev->pdev->revision == 0xc0) || - (adev->pdev->revision == 0xc3))) || - ((adev->pdev->device == 0x6981) && - ((adev->pdev->revision == 0x00) || - (adev->pdev->revision == 0x01) || - (adev->pdev->revision == 0x10)))) { + if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris12_k_smc.bin"); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index a62cbc8199de..b9c11c2b2885 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -26,6 +26,7 @@ #include <drm/drm_edid.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_dp_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -41,7 +42,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); /* bail if the connector does not have hpd pin, e.g., @@ -279,7 +280,7 @@ amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev) static void amdgpu_connector_get_edid(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->edid) @@ -463,7 +464,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector, uint64_t val) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; @@ -716,8 +717,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -832,7 +835,7 @@ static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* XXX check mode bandwidth */ @@ -854,8 +857,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = amdgpu_connector_best_single_encoder(connector); @@ -937,7 +942,7 @@ static bool amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); enum drm_connector_status status; @@ -968,7 +973,7 @@ static enum drm_connector_status amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); const struct drm_encoder_helper_funcs *encoder_funcs; int r; @@ -977,8 +982,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { @@ -1153,7 +1160,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); /* XXX check mode bandwidth */ @@ -1305,7 +1312,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector) bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if ((adev->clock.default_dispclk >= 53900) && amdgpu_connector_encoder_is_hbr2(connector)) { @@ -1319,7 +1326,7 @@ static enum drm_connector_status amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); enum drm_connector_status ret = connector_status_disconnected; struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv; @@ -1328,8 +1335,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { @@ -1405,6 +1414,12 @@ out: pm_runtime_put_autosuspend(connector->dev->dev); } + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) + drm_dp_set_subconnector_property(&amdgpu_connector->base, + ret, + amdgpu_dig_connector->dpcd, + amdgpu_dig_connector->downstream_ports); return ret; } @@ -1461,6 +1476,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector return MODE_OK; } +static int +amdgpu_connector_late_register(struct drm_connector *connector) +{ + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + int r = 0; + + if (amdgpu_connector->ddc_bus->has_aux) { + amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev; + r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux); + } + + return r; +} + static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = { .get_modes = amdgpu_connector_dp_get_modes, .mode_valid = amdgpu_connector_dp_mode_valid, @@ -1475,6 +1504,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = { .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, + .late_register = amdgpu_connector_late_register, }; static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { @@ -1485,6 +1515,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .early_unregister = amdgpu_connector_unregister, .destroy = amdgpu_connector_destroy, .force = amdgpu_connector_dvi_force, + .late_register = amdgpu_connector_late_register, }; void @@ -1497,7 +1528,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, struct amdgpu_hpd *hpd, struct amdgpu_router *router) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; @@ -1931,11 +1962,15 @@ amdgpu_connector_add(struct amdgpu_device *adev, connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; - drm_connector_register(connector); if (has_aux) amdgpu_atombios_dp_aux_init(amdgpu_connector); + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) { + drm_connector_attach_dp_subconnector_property(&amdgpu_connector->base); + } + return; failed: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5ca905b4a0fb..0311d799a010 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -28,6 +28,7 @@ #include <linux/file.h> #include <linux/pagemap.h> #include <linux/sync_file.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> #include <drm/drm_syncobj.h> @@ -56,7 +57,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, /* One for TTM and one for the CS job */ p->uf_entry.tv.num_shared = 2; - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); size = amdgpu_bo_size(bo); if (size != PAGE_SIZE || (data->offset + 8) > size) { @@ -97,8 +98,7 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, return 0; error_free: - if (info) - kvfree(info); + kvfree(info); return r; } @@ -117,7 +117,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs if (cs->in.num_chunks == 0) return 0; - chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); + chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); if (!chunk_array) return -ENOMEM; @@ -144,7 +144,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs } p->nchunks = cs->in.num_chunks; - p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), + p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), GFP_KERNEL); if (!p->chunks) { ret = -ENOMEM; @@ -238,7 +238,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs if (p->uf_entry.tv.bo) p->job->uf_addr = uf_offset; - kfree(chunk_array); + kvfree(chunk_array); /* Use this opportunity to fill in task info for the vm */ amdgpu_vm_set_task_info(vm); @@ -250,11 +250,11 @@ free_all_kdata: free_partial_kdata: for (; i >= 0; i--) kvfree(p->chunks[i].kdata); - kfree(p->chunks); + kvfree(p->chunks); p->chunks = NULL; p->nchunks = 0; free_chunk: - kfree(chunk_array); + kvfree(chunk_array); return ret; } @@ -298,7 +298,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, { s64 time_us, increment_us; u64 free_vram, total_vram, used_vram; - + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); /* Allow a maximum of 200 accumulated ms. This is basically per-IB * throttling. * @@ -315,7 +315,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, } total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); - used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + used_vram = amdgpu_vram_mgr_usage(vram_man); free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; spin_lock(&adev->mm_stats.lock); @@ -325,7 +325,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, increment_us = time_us - adev->mm_stats.last_update_us; adev->mm_stats.last_update_us = time_us; adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, - us_upper_bound); + us_upper_bound); /* This prevents the short period of low performance when the VRAM * usage is low and the driver is in debt or doesn't have enough @@ -362,7 +362,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { u64 total_vis_vram = adev->gmc.visible_vram_size; u64 used_vis_vram = - amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + amdgpu_vram_mgr_vis_usage(vram_man); if (used_vis_vram < total_vis_vram) { u64 free_vis_vram = total_vis_vram - used_vis_vram; @@ -396,26 +396,27 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, spin_unlock(&adev->mm_stats.lock); } -static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, - struct amdgpu_bo *bo) +static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_cs_parser *p = param; struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, - .resv = bo->tbo.base.resv, - .flags = 0 + .resv = bo->tbo.base.resv }; uint32_t domain; int r; - if (bo->pin_count) + if (bo->tbo.pin_count) return 0; /* Don't move this buffer if we have depleted our allowance * to move it. Don't move anything if the threshold is zero. */ - if (p->bytes_moved < p->bytes_moved_threshold) { + if (p->bytes_moved < p->bytes_moved_threshold && + (!bo->tbo.base.dma_buf || + list_empty(&bo->tbo.base.dma_buf->attachments))) { if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { /* And don't move a CPU_ACCESS_REQUIRED BO to limited @@ -450,21 +451,6 @@ retry: return r; } -static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) -{ - struct amdgpu_cs_parser *p = param; - int r; - - r = amdgpu_cs_bo_validate(p, bo); - if (r) - return r; - - if (bo->shadow) - r = amdgpu_cs_bo_validate(p, bo->shadow); - - return r; -} - static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { @@ -492,7 +478,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, lobj->user_pages); } - r = amdgpu_cs_validate(p, bo); + r = amdgpu_cs_bo_validate(p, bo); if (r) return r; @@ -558,7 +544,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); if (!e->user_pages) { - DRM_ERROR("calloc failure\n"); + DRM_ERROR("kvmalloc_array failure\n"); return -ENOMEM; } @@ -586,13 +572,27 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto out; } + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + + e->bo_va = amdgpu_vm_bo_find(vm, bo); + + if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) { + e->chain = dma_fence_chain_alloc(); + if (!e->chain) { + r = -ENOMEM; + goto error_validate; + } + } + } + amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, &p->bytes_moved_vis_threshold); p->bytes_moved = 0; p->bytes_moved_vis = 0; r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, - amdgpu_cs_validate, p); + amdgpu_cs_bo_validate, p); if (r) { DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); goto error_validate; @@ -613,15 +613,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, gws = p->bo_list->gws_obj; oa = p->bo_list->oa_obj; - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - - /* Make sure we use the exclusive slot for shared BOs */ - if (bo->prime_shared_count) - e->tv.num_shared = 0; - e->bo_va = amdgpu_vm_bo_find(vm, bo); - } - if (gds) { p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; @@ -643,24 +634,32 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) + if (r) { + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + dma_fence_chain_free(e->chain); + e->chain = NULL; + } ttm_eu_backoff_reservation(&p->ticket, &p->validated); + } out: return r; } static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) { + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_bo_list_entry *e; int r; list_for_each_entry(e, &p->validated, tv.head) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct dma_resv *resv = bo->tbo.base.resv; + enum amdgpu_sync_mode sync_mode; - r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, - amdgpu_bo_explicit_sync(bo)); - + sync_mode = amdgpu_bo_explicit_sync(bo) ? + AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; + r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, + &fpriv->vm); if (r) return r; } @@ -668,11 +667,12 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) } /** - * cs_parser_fini() - clean parser states + * amdgpu_cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number + * @backoff: indicator to backoff the reservation * - * If error is set than unvalidate buffer, otherwise just free memory + * If error is set then unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, @@ -680,9 +680,17 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (error && backoff) + if (error && backoff) { + struct amdgpu_bo_list_entry *e; + + amdgpu_bo_list_for_each_entry(e, parser->bo_list) { + dma_fence_chain_free(e->chain); + e->chain = NULL; + } + ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); + } for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); @@ -701,7 +709,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, for (i = 0; i < parser->nchunks; i++) kvfree(parser->chunks[i].kdata); - kfree(parser->chunks); + kvfree(parser->chunks); if (parser->job) amdgpu_job_free(parser->job); if (parser->uf_entry.tv.bo) { @@ -791,33 +799,27 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); + r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, - fpriv->prt_va->last_pt_update, false); + r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update); if (r) return r; if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { - struct dma_fence *f; - bo_va = fpriv->csa_va; BUG_ON(!bo_va); - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; - f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f, false); + r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); if (r) return r; } amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct dma_fence *f; - /* ignore duplicates */ bo = ttm_to_amdgpu_bo(e->tv.bo); if (!bo) @@ -827,12 +829,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; - f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f, false); + r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); if (r) return r; } @@ -845,11 +846,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); + r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update); if (r) return r; - p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); + p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ @@ -916,11 +917,17 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (parser->entity && parser->entity != entity) return -EINVAL; + /* Return if there is no run queue associated with this entity. + * Possibly because of disabled HW IP*/ + if (entity->rq == NULL) + return -EINVAL; + parser->entity = entity; ring = to_amdgpu_ring(entity->rq->sched); r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? - chunk_ib->ib_bytes : 0, ib); + chunk_ib->ib_bytes : 0, + AMDGPU_IB_POOL_DELAYED, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -987,7 +994,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, dma_fence_put(old); } - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + r = amdgpu_sync_fence(&p->job->sync, fence); dma_fence_put(fence); if (r) return r; @@ -1009,7 +1016,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, return r; } - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + r = amdgpu_sync_fence(&p->job->sync, fence); dma_fence_put(fence); return r; @@ -1120,7 +1127,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p dep->chain = NULL; if (syncobj_deps[i].point) { - dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); + dep->chain = dma_fence_chain_alloc(); if (!dep->chain) return -ENOMEM; } @@ -1128,7 +1135,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p dep->syncobj = drm_syncobj_find(p->filp, syncobj_deps[i].handle); if (!dep->syncobj) { - kfree(dep->chain); + dma_fence_chain_free(dep->chain); return -EINVAL; } dep->point = syncobj_deps[i].point; @@ -1203,8 +1210,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct drm_sched_entity *entity = p->entity; - enum drm_sched_priority priority; - struct amdgpu_ring *ring; struct amdgpu_bo_list_entry *e; struct amdgpu_job *job; uint64_t seq; @@ -1213,10 +1218,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job = p->job; p->job = NULL; - r = drm_sched_job_init(&job->base, entity, p->filp); + r = drm_sched_job_init(&job->base, entity, &fpriv->vm); if (r) goto error_unlock; + drm_sched_job_arm(&job->base); + /* No memory allocation is allowed while holding the notifier lock. * The lock is held until amdgpu_cs_submit is finished and fence is * added to BOs. @@ -1236,7 +1243,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, goto error_abort; } - job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); @@ -1255,14 +1261,32 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, trace_amdgpu_cs_ioctl(job); amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); - priority = job->base.s_priority; - drm_sched_entity_push_job(&job->base, entity); - - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); + drm_sched_entity_push_job(&job->base); amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct dma_resv *resv = e->tv.bo->base.resv; + struct dma_fence_chain *chain = e->chain; + + if (!chain) + continue; + + /* + * Work around dma_resv shortcommings by wrapping up the + * submission in a dma_fence_chain and add it as exclusive + * fence, but first add the submission as shared fence to make + * sure that shared fences never signal before the exclusive + * one. + */ + dma_fence_chain_init(chain, dma_resv_excl_fence(resv), + dma_fence_get(p->fence), 1); + + dma_resv_add_shared_fence(resv, p->fence); + rcu_assign_pointer(resv->fence_excl, &chain->base); + e->chain = NULL; + } + ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); @@ -1277,13 +1301,24 @@ error_unlock: return r; } +static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser) +{ + int i; + + if (!trace_amdgpu_cs_enabled()) + return; + + for (i = 0; i < parser->job->num_ibs; i++) + trace_amdgpu_cs(parser, i); +} + int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_cs *cs = data; struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; - int i, r; + int r; if (amdgpu_ras_intr_triggered()) return -EHWPOISON; @@ -1296,7 +1331,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_init(&parser, data); if (r) { - DRM_ERROR("Failed to initialize parser %d!\n", r); + if (printk_ratelimit()) + DRM_ERROR("Failed to initialize parser %d!\n", r); goto out; } @@ -1321,8 +1357,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) reserved_buffers = true; - for (i = 0; i < parser.job->num_ibs; i++) - trace_amdgpu_cs(&parser, i); + trace_amdgpu_cs_ibs(&parser); r = amdgpu_cs_vm_handling(&parser); if (r) @@ -1423,7 +1458,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_fence_to_handle *info = data; struct dma_fence *fence; struct drm_syncobj *syncobj; @@ -1452,7 +1487,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, dma_fence_put(fence); if (r) return r; - r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle); + r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle); drm_syncobj_put(syncobj); return r; @@ -1480,7 +1515,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, } /** - * amdgpu_cs_wait_all_fence - wait on all fences to signal + * amdgpu_cs_wait_all_fences - wait on all fences to signal * * @adev: amdgpu device * @filp: file private @@ -1599,7 +1634,7 @@ err_free_fence_array: int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_wait_fences *wait = data; uint32_t fence_count = wait->in.fence_count; struct drm_amdgpu_fence *fences_user; @@ -1631,11 +1666,12 @@ err_free_fences: } /** - * amdgpu_cs_find_bo_va - find bo_va for VM address + * amdgpu_cs_find_mapping - find bo_va for VM address * * @parser: command submission parser context * @addr: VM address * @bo: resulting BO of the mapping found + * @map: Placeholder to return found BO mapping * * Search the buffer objects in the command submission context for a certain * virtual memory address. Returns allocation structure when found, NULL diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 08047bc4d588..da21e60bb827 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -37,10 +37,9 @@ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, u32 domain, uint32_t size) { - int r; void *ptr; - r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, domain, bo, NULL, &ptr); if (!*bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6614d8a6f4c8..468003583b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_sched.h" #include "amdgpu_ras.h" +#include <linux/nospec.h> #define to_amdgpu_ctx_entity(e) \ container_of((e), struct amdgpu_ctx_entity, entity) @@ -42,21 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_JPEG] = 1, }; -static int amdgpu_ctx_total_num_entities(void) +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) { - unsigned i, num_entities = 0; + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_UNSET: + case AMDGPU_CTX_PRIORITY_VERY_LOW: + case AMDGPU_CTX_PRIORITY_LOW: + case AMDGPU_CTX_PRIORITY_NORMAL: + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return true; + default: + return false; + } +} + +static enum drm_sched_priority +amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) +{ + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_UNSET: + return DRM_SCHED_PRIORITY_UNSET; + + case AMDGPU_CTX_PRIORITY_VERY_LOW: + return DRM_SCHED_PRIORITY_MIN; + + case AMDGPU_CTX_PRIORITY_LOW: + return DRM_SCHED_PRIORITY_MIN; + + case AMDGPU_CTX_PRIORITY_NORMAL: + return DRM_SCHED_PRIORITY_NORMAL; + + case AMDGPU_CTX_PRIORITY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; + + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) - num_entities += amdgpu_ctx_num_entities[i]; + /* This should not happen as we sanitized userspace provided priority + * already, WARN if this happens. + */ + default: + WARN(1, "Invalid context priority %d\n", ctx_prio); + return DRM_SCHED_PRIORITY_NORMAL; + } - return num_entities; } static int amdgpu_ctx_priority_permit(struct drm_file *filp, - enum drm_sched_priority priority) + int32_t priority) { + if (!amdgpu_ctx_priority_is_valid(priority)) + return -EINVAL; + /* NORMAL and below are accessible by everyone */ - if (priority <= DRM_SCHED_PRIORITY_NORMAL) + if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) return 0; if (capable(CAP_SYS_NICE)) @@ -68,47 +109,124 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } +static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio) +{ + switch (prio) { + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return AMDGPU_GFX_PIPE_PRIO_HIGH; + default: + return AMDGPU_GFX_PIPE_PRIO_NORMAL; + } +} + +static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio) +{ + switch (prio) { + case AMDGPU_CTX_PRIORITY_HIGH: + return AMDGPU_RING_PRIO_1; + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} + +static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip) +{ + struct amdgpu_device *adev = ctx->adev; + int32_t ctx_prio; + unsigned int hw_prio; + + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + + switch (hw_ip) { + case AMDGPU_HW_IP_COMPUTE: + hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio); + break; + case AMDGPU_HW_IP_VCE: + case AMDGPU_HW_IP_VCN_ENC: + hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio); + break; + default: + hw_prio = AMDGPU_RING_PRIO_DEFAULT; + break; + } + + hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); + if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) + hw_prio = AMDGPU_RING_PRIO_DEFAULT; + + return hw_prio; +} + + +static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, + const u32 ring) +{ + struct amdgpu_device *adev = ctx->adev; + struct amdgpu_ctx_entity *entity; + struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; + unsigned num_scheds = 0; + int32_t ctx_prio; + unsigned int hw_prio; + enum drm_sched_priority drm_prio; + int r; + + entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), + GFP_KERNEL); + if (!entity) + return -ENOMEM; + + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + entity->sequence = 1; + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); + drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio); + + hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); + scheds = adev->gpu_sched[hw_ip][hw_prio].sched; + num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; + + /* disable load balance if the hw engine retains context among dependent jobs */ + if (hw_ip == AMDGPU_HW_IP_VCN_ENC || + hw_ip == AMDGPU_HW_IP_VCN_DEC || + hw_ip == AMDGPU_HW_IP_UVD_ENC || + hw_ip == AMDGPU_HW_IP_UVD) { + sched = drm_sched_pick_best(scheds, num_scheds); + scheds = &sched; + num_scheds = 1; + } + + r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds, + &ctx->guilty); + if (r) + goto error_free_entity; + + ctx->entities[hw_ip][ring] = entity; + return 0; + +error_free_entity: + kfree(entity); + + return r; +} + static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum drm_sched_priority priority, + int32_t priority, struct drm_file *filp, struct amdgpu_ctx *ctx) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); - unsigned i, j, k; int r; - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) - return -EINVAL; - r = amdgpu_ctx_priority_permit(filp, priority); if (r) return r; memset(ctx, 0, sizeof(*ctx)); - ctx->adev = adev; - ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, - sizeof(struct dma_fence*), GFP_KERNEL); - if (!ctx->fences) - return -ENOMEM; - - ctx->entities[0] = kcalloc(num_entities, - sizeof(struct amdgpu_ctx_entity), - GFP_KERNEL); - if (!ctx->entities[0]) { - r = -ENOMEM; - goto error_free_fences; - } - - for (i = 0; i < num_entities; ++i) { - struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; - - entity->sequence = 1; - entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; - } - for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) - ctx->entities[i] = ctx->entities[i - 1] + - amdgpu_ctx_num_entities[i - 1]; + ctx->adev = adev; kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); @@ -118,116 +236,50 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->reset_counter_query = ctx->reset_counter; ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->init_priority = priority; - ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; + ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET; - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { - struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; - struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; - unsigned num_rings = 0; - unsigned num_rqs = 0; - - switch (i) { - case AMDGPU_HW_IP_GFX: - rings[0] = &adev->gfx.gfx_ring[0]; - num_rings = 1; - break; - case AMDGPU_HW_IP_COMPUTE: - for (j = 0; j < adev->gfx.num_compute_rings; ++j) - rings[j] = &adev->gfx.compute_ring[j]; - num_rings = adev->gfx.num_compute_rings; - break; - case AMDGPU_HW_IP_DMA: - for (j = 0; j < adev->sdma.num_instances; ++j) - rings[j] = &adev->sdma.instance[j].ring; - num_rings = adev->sdma.num_instances; - break; - case AMDGPU_HW_IP_UVD: - rings[0] = &adev->uvd.inst[0].ring; - num_rings = 1; - break; - case AMDGPU_HW_IP_VCE: - rings[0] = &adev->vce.ring[0]; - num_rings = 1; - break; - case AMDGPU_HW_IP_UVD_ENC: - rings[0] = &adev->uvd.inst[0].ring_enc[0]; - num_rings = 1; - break; - case AMDGPU_HW_IP_VCN_DEC: - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { - if (adev->vcn.harvest_config & (1 << j)) - continue; - rings[num_rings++] = &adev->vcn.inst[j].ring_dec; - } - break; - case AMDGPU_HW_IP_VCN_ENC: - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { - if (adev->vcn.harvest_config & (1 << j)) - continue; - for (k = 0; k < adev->vcn.num_enc_rings; ++k) - rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k]; - } - break; - case AMDGPU_HW_IP_VCN_JPEG: - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { - if (adev->vcn.harvest_config & (1 << j)) - continue; - rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg; - } - break; - } - - for (j = 0; j < num_rings; ++j) { - if (!rings[j]->adev) - continue; + return 0; +} - rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority]; - } +static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) +{ - for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) - r = drm_sched_entity_init(&ctx->entities[i][j].entity, - rqs, num_rqs, &ctx->guilty); - if (r) - goto error_cleanup_entities; - } + int i; - return 0; + if (!entity) + return; -error_cleanup_entities: - for (i = 0; i < num_entities; ++i) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); - kfree(ctx->entities[0]); + for (i = 0; i < amdgpu_sched_jobs; ++i) + dma_fence_put(entity->fences[i]); -error_free_fences: - kfree(ctx->fences); - ctx->fences = NULL; - return r; + kfree(entity); } static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_device *adev = ctx->adev; unsigned i, j; if (!adev) return; - for (i = 0; i < num_entities; ++i) - for (j = 0; j < amdgpu_sched_jobs; ++j) - dma_fence_put(ctx->entities[0][i].fences[j]); - kfree(ctx->fences); - kfree(ctx->entities[0]); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { + amdgpu_ctx_fini_entity(ctx->entities[i][j]); + ctx->entities[i][j] = NULL; + } + } mutex_destroy(&ctx->lock); - kfree(ctx); } int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity) { + int r; + if (hw_ip >= AMDGPU_HW_IP_NUM) { DRM_ERROR("unknown HW IP type: %d\n", hw_ip); return -EINVAL; @@ -244,14 +296,20 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, return -EINVAL; } - *entity = &ctx->entities[hw_ip][ring].entity; + if (ctx->entities[hw_ip][ring] == NULL) { + r = amdgpu_ctx_init_entity(ctx, hw_ip, ring); + if (r) + return r; + } + + *entity = &ctx->entities[hw_ip][ring]->entity; return 0; } static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, - enum drm_sched_priority priority, + int32_t priority, uint32_t *id) { struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; @@ -284,14 +342,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; - unsigned num_entities; - u32 i; + u32 i, j; ctx = container_of(ref, struct amdgpu_ctx, refcount); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + if (!ctx->entities[i][j]) + continue; - num_entities = amdgpu_ctx_total_num_entities(); - for (i = 0; i < num_entities; i++) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); + drm_sched_entity_destroy(&ctx->entities[i][j]->entity); + } + } amdgpu_ctx_fini(ref); } @@ -345,13 +406,15 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, return 0; } +#define AMDGPU_RAS_COUNTE_DELAY_MS 3000 + static int amdgpu_ctx_query2(struct amdgpu_device *adev, - struct amdgpu_fpriv *fpriv, uint32_t id, - union drm_amdgpu_ctx_out *out) + struct amdgpu_fpriv *fpriv, uint32_t id, + union drm_amdgpu_ctx_out *out) { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr; - unsigned long ras_counter; if (!fpriv) return -EINVAL; @@ -376,19 +439,28 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, if (atomic_read(&ctx->guilty)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; - /*query ue count*/ - ras_counter = amdgpu_ras_query_error_count(adev, false); - /*ras counter is monotonic increasing*/ - if (ras_counter != ctx->ras_counter_ue) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; - ctx->ras_counter_ue = ras_counter; - } + if (adev->ras_enabled && con) { + /* Return the cached values in O(1), + * and schedule delayed work to cache + * new vaues. + */ + int ce_count, ue_count; + + ce_count = atomic_read(&con->ras_ce_count); + ue_count = atomic_read(&con->ras_ue_count); + + if (ce_count != ctx->ras_counter_ce) { + ctx->ras_counter_ce = ce_count; + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; + } + + if (ue_count != ctx->ras_counter_ue) { + ctx->ras_counter_ue = ue_count; + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; + } - /*query ce count*/ - ras_counter = amdgpu_ras_query_error_count(adev, true); - if (ras_counter != ctx->ras_counter_ce) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; - ctx->ras_counter_ce = ras_counter; + schedule_delayed_work(&con->ras_counte_delay_work, + msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS)); } mutex_unlock(&mgr->lock); @@ -400,20 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, { int r; uint32_t id; - enum drm_sched_priority priority; + int32_t priority; union drm_amdgpu_ctx *args = data; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; - r = 0; id = args->in.ctx_id; - priority = amdgpu_to_sched_priority(args->in.priority); + priority = args->in.priority; /* For backwards compatibility reasons, we need to accept * ioctls with garbage in the priority field */ - if (priority == DRM_SCHED_PRIORITY_INVALID) - priority = DRM_SCHED_PRIORITY_NORMAL; + if (!amdgpu_ctx_priority_is_valid(priority)) + priority = AMDGPU_CTX_PRIORITY_NORMAL; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: @@ -465,7 +536,7 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, - struct dma_fence *fence, uint64_t* handle) + struct dma_fence *fence, uint64_t *handle) { struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); uint64_t seq = centity->sequence; @@ -518,22 +589,49 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, return fence; } +static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, + struct amdgpu_ctx_entity *aentity, + int hw_ip, + int32_t priority) +{ + struct amdgpu_device *adev = ctx->adev; + unsigned int hw_prio; + struct drm_gpu_scheduler **scheds = NULL; + unsigned num_scheds; + + /* set sw priority */ + drm_sched_entity_set_priority(&aentity->entity, + amdgpu_ctx_to_drm_sched_prio(priority)); + + /* set hw priority */ + if (hw_ip == AMDGPU_HW_IP_COMPUTE) { + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); + hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); + scheds = adev->gpu_sched[hw_ip][hw_prio].sched; + num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; + drm_sched_entity_modify_sched(&aentity->entity, scheds, + num_scheds); + } +} + void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority) + int32_t priority) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); - enum drm_sched_priority ctx_prio; - unsigned i; + int32_t ctx_prio; + unsigned i, j; ctx->override_priority = priority; - ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + if (!ctx->entities[i][j]) + continue; - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity = &ctx->entities[0][i].entity; - - drm_sched_entity_set_priority(entity, ctx_prio); + amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j], + i, ctx_prio); + } } } @@ -569,20 +667,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; mutex_lock(&mgr->lock); idr_for_each_entry(idp, ctx, id) { - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; - entity = &ctx->entities[0][i].entity; - timeout = drm_sched_entity_flush(entity, timeout); + entity = &ctx->entities[i][j]->entity; + timeout = drm_sched_entity_flush(entity, timeout); + } } } mutex_unlock(&mgr->lock); @@ -591,10 +693,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; @@ -604,8 +705,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; } - for (i = 0; i < num_entities; i++) - drm_sched_entity_fini(&ctx->entities[0][i].entity); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; + + entity = &ctx->entities[i][j]->entity; + drm_sched_entity_fini(entity); + } + } } } @@ -627,3 +737,81 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) idr_destroy(&mgr->ctx_handles); mutex_destroy(&mgr->lock); } + +static void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx, + struct amdgpu_ctx_entity *centity, ktime_t *total, ktime_t *max) +{ + ktime_t now, t1; + uint32_t i; + + *total = *max = 0; + + now = ktime_get(); + for (i = 0; i < amdgpu_sched_jobs; i++) { + struct dma_fence *fence; + struct drm_sched_fence *s_fence; + + spin_lock(&ctx->ring_lock); + fence = dma_fence_get(centity->fences[i]); + spin_unlock(&ctx->ring_lock); + if (!fence) + continue; + s_fence = to_drm_sched_fence(fence); + if (!dma_fence_is_signaled(&s_fence->scheduled)) { + dma_fence_put(fence); + continue; + } + t1 = s_fence->scheduled.timestamp; + if (!ktime_before(t1, now)) { + dma_fence_put(fence); + continue; + } + if (dma_fence_is_signaled(&s_fence->finished) && + s_fence->finished.timestamp < now) + *total += ktime_sub(s_fence->finished.timestamp, t1); + else + *total += ktime_sub(now, t1); + t1 = ktime_sub(now, t1); + dma_fence_put(fence); + *max = max(t1, *max); + } +} + +ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip, + uint32_t idx, uint64_t *elapsed) +{ + struct idr *idp; + struct amdgpu_ctx *ctx; + uint32_t id; + struct amdgpu_ctx_entity *centity; + ktime_t total = 0, max = 0; + + if (idx >= AMDGPU_MAX_ENTITY_NUM) + return 0; + idp = &mgr->ctx_handles; + mutex_lock(&mgr->lock); + idr_for_each_entry(idp, ctx, id) { + ktime_t ttotal, tmax; + + if (!ctx->entities[hwip][idx]) + continue; + + centity = ctx->entities[hwip][idx]; + amdgpu_ctx_fence_time(ctx, centity, &ttotal, &tmax); + + /* Harmonic mean approximation diverges for very small + * values. If ratio < 0.01% ignore + */ + if (AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(tmax, ttotal)) + continue; + + total = ktime_add(total, ttotal); + max = ktime_after(tmax, max) ? tmax : max; + } + + mutex_unlock(&mgr->lock); + if (elapsed) + *elapsed = max; + + return total; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index da808633732b..a44b8b8ed39c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -29,10 +29,13 @@ struct drm_device; struct drm_file; struct amdgpu_fpriv; +#define AMDGPU_MAX_ENTITY_NUM 4 +#define AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(max, total) ((max) > 16384ULL*(total)) + struct amdgpu_ctx_entity { uint64_t sequence; - struct dma_fence **fences; struct drm_sched_entity entity; + struct dma_fence *fences[]; }; struct amdgpu_ctx { @@ -42,11 +45,10 @@ struct amdgpu_ctx { unsigned reset_counter_query; uint32_t vram_lost_counter; spinlock_t ring_lock; - struct dma_fence **fences; - struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; + struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; bool preamble_presented; - enum drm_sched_priority init_priority; - enum drm_sched_priority override_priority; + int32_t init_priority; + int32_t override_priority; struct mutex lock; atomic_t guilty; unsigned long ras_counter_ce; @@ -73,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, uint64_t seq); -void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority); +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio); +void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -86,5 +88,6 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); - +ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip, + uint32_t idx, uint64_t *elapsed); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 8e6726e0d035..164d6a9e9fbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -26,49 +26,16 @@ #include <linux/kthread.h> #include <linux/pci.h> #include <linux/uaccess.h> - -#include <drm/drm_debugfs.h> +#include <linux/pm_runtime.h> #include "amdgpu.h" - -/** - * amdgpu_debugfs_add_files - Add simple debugfs entries - * - * @adev: Device to attach debugfs entries to - * @files: Array of function callbacks that respond to reads - * @nfiles: Number of callbacks to register - * - */ -int amdgpu_debugfs_add_files(struct amdgpu_device *adev, - const struct drm_info_list *files, - unsigned nfiles) -{ - unsigned i; - - for (i = 0; i < adev->debugfs_count; i++) { - if (adev->debugfs[i].files == files) { - /* Already registered */ - return 0; - } - } - - i = adev->debugfs_count + 1; - if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { - DRM_ERROR("Reached maximum number of debugfs components.\n"); - DRM_ERROR("Report so we increase " - "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); - return -EINVAL; - } - adev->debugfs[adev->debugfs_count].files = files; - adev->debugfs[adev->debugfs_count].num_files = nfiles; - adev->debugfs_count = i; -#if defined(CONFIG_DEBUG_FS) - drm_debugfs_create_files(files, nfiles, - adev->ddev->primary->debugfs_root, - adev->ddev->primary); -#endif - return 0; -} +#include "amdgpu_pm.h" +#include "amdgpu_dm_debugfs.h" +#include "amdgpu_ras.h" +#include "amdgpu_rap.h" +#include "amdgpu_securedisplay.h" +#include "amdgpu_fw_attestation.h" +#include "amdgpu_umr.h" #if defined(CONFIG_DEBUG_FS) @@ -86,14 +53,14 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev, * * Bit 62: Indicates a GRBM bank switch is needed * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is - * zero) + * zero) * Bits 24..33: The SE or ME selector if needed * Bits 34..43: The SH (or SA) or PIPE selector if needed * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed * * Bit 23: Indicates that the PM power gating lock should be held - * This is necessary to read registers that might be - * unreliable during a power gating transistion. + * This is necessary to read registers that might be + * unreliable during a power gating transistion. * * The lower bits are the BYTE offset of the register to read. This * allows reading multiple registers in a single call and having @@ -129,7 +96,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, sh_bank = 0xFFFFFFFF; if (instance_bank == 0x3FF) instance_bank = 0xFFFFFFFF; - use_bank = 1; + use_bank = true; } else if (*pos & (1ULL << 61)) { me = (*pos & GENMASK_ULL(33, 24)) >> 24; @@ -137,17 +104,33 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, queue = (*pos & GENMASK_ULL(53, 44)) >> 44; vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; - use_ring = 1; + use_ring = true; } else { - use_bank = use_ring = 0; + use_bank = use_ring = false; } *pos &= (1UL << 22) - 1; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || - (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) + (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; + } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se_bank, sh_bank, instance_bank); @@ -168,7 +151,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } else { r = get_user(value, (uint32_t *)buf); if (!r) - WREG32(*pos >> 2, value); + amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value); } if (r) { result = r; @@ -193,10 +176,14 @@ end: if (pm_pg_lock) mutex_unlock(&adev->pm.mutex); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); return result; } -/** +/* * amdgpu_debugfs_regs_read - Callback for reading MMIO registers */ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, @@ -205,7 +192,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); } -/** +/* * amdgpu_debugfs_regs_write - Callback for writing MMIO registers */ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, @@ -214,6 +201,145 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); } +static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd; + + rd = kzalloc(sizeof *rd, GFP_KERNEL); + if (!rd) + return -ENOMEM; + rd->adev = file_inode(file)->i_private; + file->private_data = rd; + mutex_init(&rd->lock); + + return 0; +} + +static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd = file->private_data; + mutex_destroy(&rd->lock); + kfree(file->private_data); + return 0; +} + +static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + struct amdgpu_device *adev = rd->adev; + ssize_t result = 0; + int r; + uint32_t value; + + if (size & 0x3 || offset & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + mutex_lock(&rd->lock); + + if (rd->id.use_grbm) { + if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || + (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); + mutex_unlock(&rd->lock); + return -EINVAL; + } + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, + rd->id.grbm.sh, + rd->id.grbm.instance); + } + + if (rd->id.use_srbm) { + mutex_lock(&adev->srbm_mutex); + amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, + rd->id.srbm.queue, rd->id.srbm.vmid); + } + + if (rd->id.pg_lock) + mutex_lock(&adev->pm.mutex); + + while (size) { + if (!write_en) { + value = RREG32(offset >> 2); + r = put_user(value, (uint32_t *)buf); + } else { + r = get_user(value, (uint32_t *)buf); + if (!r) + amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value); + } + if (r) { + result = r; + goto end; + } + offset += 4; + size -= 4; + result += 4; + buf += 4; + } +end: + if (rd->id.use_grbm) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + + if (rd->id.use_srbm) { + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } + + if (rd->id.pg_lock) + mutex_unlock(&adev->pm.mutex); + + mutex_unlock(&rd->lock); + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); + return result; +} + +static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + int r; + + switch (cmd) { + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: + mutex_lock(&rd->lock); + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id); + mutex_unlock(&rd->lock); + return r ? -EINVAL : 0; + default: + return -EINVAL; + } + return 0; +} + +static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); +} + +static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); +} + /** * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register @@ -237,13 +363,29 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + while (size) { uint32_t value; - value = RREG32_PCIE(*pos >> 2); + value = RREG32_PCIE(*pos); r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; + } result += 4; buf += 4; @@ -251,6 +393,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, size -= 4; } + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -276,14 +422,30 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + while (size) { uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; + } - WREG32_PCIE(*pos >> 2, value); + WREG32_PCIE(*pos, value); result += 4; buf += 4; @@ -291,6 +453,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user size -= 4; } + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -316,13 +482,29 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + while (size) { uint32_t value; value = RREG32_DIDT(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; + } result += 4; buf += 4; @@ -330,6 +512,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, size -= 4; } + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -355,12 +541,28 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + while (size) { uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; + } WREG32_DIDT(*pos >> 2, value); @@ -370,6 +572,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user size -= 4; } + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -395,13 +601,29 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + while (size) { uint32_t value; value = RREG32_SMC(*pos); r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; + } result += 4; buf += 4; @@ -409,6 +631,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, size -= 4; } + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -434,12 +660,28 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (size & 0x3 || *pos & 0x3) return -EINVAL; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + while (size) { uint32_t value; r = get_user(value, (uint32_t *)buf); - if (r) + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); return r; + } WREG32_SMC(*pos, value); @@ -449,6 +691,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * size -= 4; } + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -572,12 +818,33 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, idx = *pos >> 2; valuesize = sizeof(values); + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); - if (r) + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + if (r) { + amdgpu_virt_disable_access_debugfs(adev); return r; + } - if (size > valuesize) + if (size > valuesize) { + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; + } outsize = 0; x = 0; @@ -590,6 +857,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, } } + amdgpu_virt_disable_access_debugfs(adev); return !r ? outsize : r; } @@ -619,7 +887,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, { struct amdgpu_device *adev = f->f_inode->i_private; int r, x; - ssize_t result=0; + ssize_t result = 0; uint32_t offset, se, sh, cu, wave, simd, data[32]; if (size & 3 || *pos & 3) @@ -633,6 +901,18 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, wave = (*pos & GENMASK_ULL(36, 31)) >> 31; simd = (*pos & GENMASK_ULL(44, 37)) >> 37; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -644,16 +924,23 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); mutex_unlock(&adev->grbm_idx_mutex); - if (!x) + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + if (!x) { + amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; + } while (size && (offset < x * 4)) { uint32_t value; value = data[offset >> 2]; r = put_user(value, (uint32_t *)buf); - if (r) + if (r) { + amdgpu_virt_disable_access_debugfs(adev); return r; + } result += 4; buf += 4; @@ -661,6 +948,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, size -= 4; } + amdgpu_virt_disable_access_debugfs(adev); return result; } @@ -694,11 +982,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, ssize_t result = 0; uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; - if (size & 3 || *pos & 3) + if (size > 4096 || size & 3 || *pos & 3) return -EINVAL; /* decode offset */ - offset = *pos & GENMASK_ULL(11, 0); + offset = (*pos & GENMASK_ULL(11, 0)) >> 2; se = (*pos & GENMASK_ULL(19, 12)) >> 12; sh = (*pos & GENMASK_ULL(27, 20)) >> 20; cu = (*pos & GENMASK_ULL(35, 28)) >> 28; @@ -711,6 +999,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, if (!data) return -ENOMEM; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) + goto err; + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) + goto err; + /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se, sh, cu); @@ -726,13 +1022,16 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); mutex_unlock(&adev->grbm_idx_mutex); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + while (size) { uint32_t value; - value = data[offset++]; + value = data[result >> 2]; r = put_user(value, (uint32_t *)buf); if (r) { - result = r; + amdgpu_virt_disable_access_debugfs(adev); goto err; } @@ -741,11 +1040,128 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, size -= 4; } + kfree(data); + amdgpu_virt_disable_access_debugfs(adev); + return result; + err: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); kfree(data); + return r; +} + +/** + * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF + * + * @f: open file handle + * @buf: User buffer to write data from + * @size: Number of bytes to write + * @pos: Offset to seek to + * + * Write a 32-bit zero to disable or a 32-bit non-zero to enable + */ +static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + amdgpu_gfx_off_ctrl(adev, value ? true : false); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + return result; +} + + +/** + * amdgpu_debugfs_gfxoff_read - read gfxoff status + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + */ +static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) + return r; + + while (size) { + uint32_t value; + + r = amdgpu_get_gfx_off_status(adev, &value); + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = put_user(value, (uint32_t *)buf); + if (r) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return result; } +static const struct file_operations amdgpu_debugfs_regs2_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, + .read = amdgpu_debugfs_regs2_read, + .write = amdgpu_debugfs_regs2_write, + .open = amdgpu_debugfs_regs2_open, + .release = amdgpu_debugfs_regs2_release, + .llseek = default_llseek +}; + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -794,8 +1210,16 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gfxoff_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gfxoff_read, + .write = amdgpu_debugfs_gfxoff_write, + .llseek = default_llseek +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, + &amdgpu_debugfs_regs2_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, @@ -803,10 +1227,12 @@ static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_sensors_fops, &amdgpu_debugfs_wave_fops, &amdgpu_debugfs_gpr_fops, + &amdgpu_debugfs_gfxoff_fops, }; static const char *debugfs_regs_names[] = { "amdgpu_regs", + "amdgpu_regs2", "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", @@ -814,17 +1240,18 @@ static const char *debugfs_regs_names[] = { "amdgpu_sensors", "amdgpu_wave", "amdgpu_gpr", + "amdgpu_gfxoff", }; /** * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide - * register access. + * register access. * * @adev: The device to attach the debugfs entries to */ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { - struct drm_minor *minor = adev->ddev->primary; + struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *ent, *root = minor->debugfs_root; unsigned int i; @@ -834,33 +1261,27 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) adev, debugfs_regs[i]); if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); - adev->debugfs_regs[i] = ent; } return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) +static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) { - unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct drm_device *dev = adev_to_drm(adev); + int r = 0, i; - for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { - if (adev->debugfs_regs[i]) { - debugfs_remove(adev->debugfs_regs[i]); - adev->debugfs_regs[i] = NULL; - } + r = pm_runtime_get_sync(dev->dev); + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); + return r; } -} - -static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - int r = 0, i; /* Avoid accidently unparking the sched thread during GPU reset */ - mutex_lock(&adev->lock_reset); + r = down_write_killable(&adev->reset_sem); + if (r) + return r; /* hold on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { @@ -887,47 +1308,91 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) kthread_unpark(ring->sched.thread); } - mutex_unlock(&adev->lock_reset); + up_write(&adev->reset_sem); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); return 0; } -static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) +static int amdgpu_debugfs_evict_vram(void *data, u64 *val) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = (struct amdgpu_device *)data; + struct drm_device *dev = adev_to_drm(adev); + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); + return r; + } + + *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); - seq_write(m, adev->bios, adev->bios_size); return 0; } -static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) + +static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = (struct amdgpu_device *)data; + struct drm_device *dev = adev_to_drm(adev); + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); + return r; + } + + *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); - seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); return 0; } -static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) + +static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct drm_device *dev = adev_to_drm(adev); + struct drm_file *file; + int r; - seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); - return 0; + r = mutex_lock_interruptible(&dev->filelist_mutex); + if (r) + return r; + + list_for_each_entry(file, &dev->filelist, lhead) { + struct amdgpu_fpriv *fpriv = file->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; + + seq_printf(m, "pid:%d\tProcess:%s ----------\n", + vm->task_info.pid, vm->task_info.process_name); + r = amdgpu_bo_reserve(vm->root.bo, true); + if (r) + break; + amdgpu_debugfs_vm_bo_info(vm, m); + amdgpu_bo_unreserve(vm->root.bo); + } + + mutex_unlock(&dev->filelist_mutex); + + return r; } -static const struct drm_info_list amdgpu_debugfs_list[] = { - {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, - {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, - {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}, - {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt}, -}; +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib); +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info); +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram, + NULL, "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt, + NULL, "%lld\n"); static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring, struct dma_fence **fences) @@ -980,7 +1445,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) struct dma_fence *fence; spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry(s_job, &sched->pending_list, list) { fence = sched->ops->run_job(s_job); dma_fence_put(fence); } @@ -990,27 +1455,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) { struct amdgpu_job *job; - struct drm_sched_job *s_job; + struct drm_sched_job *s_job, *tmp; uint32_t preempt_seq; struct dma_fence *fence, **ptr; struct amdgpu_fence_driver *drv = &ring->fence_drv; struct drm_gpu_scheduler *sched = &ring->sched; + bool preempted = true; if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) return; preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); - if (preempt_seq <= atomic_read(&drv->last_seq)) - return; + if (preempt_seq <= atomic_read(&drv->last_seq)) { + preempted = false; + goto no_preempt; + } preempt_seq &= drv->num_fences_mask; ptr = &drv->fences[preempt_seq]; fence = rcu_dereference_protected(*ptr, 1); +no_preempt: spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { + if (dma_fence_is_signaled(&s_job->s_fence->finished)) { + /* remove job from ring_mirror_list */ + list_del_init(&s_job->list); + sched->ops->free_job(s_job); + continue; + } job = to_amdgpu_job(s_job); - if (job->fence == fence) + if (preempted && (&job->hw_fence) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -1042,7 +1517,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) return -ENOMEM; /* Avoid accidently unparking the sched thread during GPU reset */ - mutex_lock(&adev->lock_reset); + r = down_read_killable(&adev->reset_sem); + if (r) + goto pro_end; /* stop the scheduler */ kthread_park(ring->sched.thread); @@ -1083,36 +1560,130 @@ failure: /* restart the scheduler */ kthread_unpark(ring->sched.thread); - mutex_unlock(&adev->lock_reset); + up_read(&adev->reset_sem); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); +pro_end: kfree(fences); + return r; +} + +static int amdgpu_debugfs_sclk_set(void *data, u64 val) +{ + int ret = 0; + uint32_t max_freq, min_freq; + struct amdgpu_device *adev = (struct amdgpu_device *)data; + + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + + ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return ret; + } + + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq); + if (ret || val > max_freq || val < min_freq) + return -EINVAL; + ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val); + } else { + return 0; + } + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + if (ret) + return -EINVAL; + return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL, +DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, amdgpu_debugfs_ib_preempt, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL, + amdgpu_debugfs_sclk_set, "%llu\n"); + int amdgpu_debugfs_init(struct amdgpu_device *adev) { - adev->debugfs_preempt = - debugfs_create_file("amdgpu_preempt_ib", 0600, - adev->ddev->primary->debugfs_root, adev, - &fops_ib_preempt); - if (!(adev->debugfs_preempt)) { + struct dentry *root = adev_to_drm(adev)->primary->debugfs_root; + struct dentry *ent; + int r, i; + + if (!debugfs_initialized()) + return 0; + + ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, + &fops_ib_preempt); + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, - ARRAY_SIZE(amdgpu_debugfs_list)); -} + ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, + &fops_sclk_set); + if (IS_ERR(ent)) { + DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); + return PTR_ERR(ent); + } -void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) -{ - debugfs_remove(adev->debugfs_preempt); + /* Register debugfs entries for amdgpu_ttm */ + amdgpu_ttm_debugfs_init(adev); + amdgpu_debugfs_pm_init(adev); + amdgpu_debugfs_sa_init(adev); + amdgpu_debugfs_fence_init(adev); + amdgpu_debugfs_gem_init(adev); + + r = amdgpu_debugfs_regs_init(adev); + if (r) + DRM_ERROR("registering register debugfs failed (%d).\n", r); + + amdgpu_debugfs_firmware_init(adev); + +#if defined(CONFIG_DRM_AMD_DC) + if (amdgpu_device_has_dc_support(adev)) + dtn_debugfs_init(adev); +#endif + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring) + continue; + + amdgpu_debugfs_ring_init(adev, ring); + } + + amdgpu_ras_debugfs_create_all(adev); + amdgpu_rap_debugfs_init(adev); + amdgpu_securedisplay_debugfs_init(adev); + amdgpu_fw_attestation_debugfs_init(adev); + + debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, + &amdgpu_evict_vram_fops); + debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, + &amdgpu_evict_gtt_fops); + debugfs_create_file("amdgpu_test_ib", 0444, root, adev, + &amdgpu_debugfs_test_ib_fops); + debugfs_create_file("amdgpu_vm_info", 0444, root, adev, + &amdgpu_debugfs_vm_info_fops); + + adev->debugfs_vbios_blob.data = adev->bios; + adev->debugfs_vbios_blob.size = adev->bios_size; + debugfs_create_blob("amdgpu_vbios", 0444, root, + &adev->debugfs_vbios_blob); + + adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; + adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; + debugfs_create_blob("amdgpu_discovery", 0444, root, + &adev->debugfs_discovery_blob); + + return 0; } #else @@ -1120,10 +1691,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { } int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index f289d28ad6b2..371a6f0deb29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -22,22 +22,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ - /* * Debugfs */ -struct amdgpu_debugfs { - const struct drm_info_list *files; - unsigned num_files; -}; int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev); -void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev); -int amdgpu_debugfs_add_files(struct amdgpu_device *adev, - const struct drm_info_list *files, - unsigned nfiles); -int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); -int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); -int amdgpu_debugfs_gem_init(struct amdgpu_device *adev); +void amdgpu_debugfs_fini(struct amdgpu_device *adev); +void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); +void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); +void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c17505fba988..6e40cc1bc6dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -64,8 +64,14 @@ #include "amdgpu_xgmi.h" #include "amdgpu_ras.h" #include "amdgpu_pmu.h" +#include "amdgpu_fru_eeprom.h" +#include "amdgpu_reset.h" #include <linux/suspend.h> +#include <drm/task_barrier.h> +#include <linux/pm_runtime.h> + +#include <drm/drm_drv.h> MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); @@ -77,6 +83,8 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -106,9 +114,18 @@ const char *amdgpu_asic_name[] = { "RAVEN", "ARCTURUS", "RENOIR", + "ALDEBARAN", "NAVI10", + "CYAN_SKILLFISH", "NAVI14", "NAVI12", + "SIENNA_CICHLID", + "NAVY_FLOUNDER", + "VANGOGH", + "DIMGREY_CAVEFISH", + "BEIGE_GOBY", + "YELLOW_CARP", + "IP DISCOVERY", "LAST", }; @@ -125,10 +142,10 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); - return snprintf(buf, PAGE_SIZE, "%llu\n", cnt); + return sysfs_emit(buf, "%llu\n", cnt); } static DEVICE_ATTR(pcie_replay_count, S_IRUGO, @@ -137,26 +154,142 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO, static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); /** - * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control + * DOC: product_name + * + * The amdgpu driver provides a sysfs API for reporting the product name + * for the device + * The file serial_number is used for this and returns the product name + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_device_get_product_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->product_name); +} + +static DEVICE_ATTR(product_name, S_IRUGO, + amdgpu_device_get_product_name, NULL); + +/** + * DOC: product_number + * + * The amdgpu driver provides a sysfs API for reporting the part number + * for the device + * The file serial_number is used for this and returns the part number + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_device_get_product_number(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->product_number); +} + +static DEVICE_ATTR(product_number, S_IRUGO, + amdgpu_device_get_product_number, NULL); + +/** + * DOC: serial_number + * + * The amdgpu driver provides a sysfs API for reporting the serial number + * for the device + * The file serial_number is used for this and returns the serial number + * as returned from the FRU. + * NOTE: This is only available for certain server cards + */ + +static ssize_t amdgpu_device_get_serial_number(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + return sysfs_emit(buf, "%s\n", adev->serial); +} + +static DEVICE_ATTR(serial_number, S_IRUGO, + amdgpu_device_get_serial_number, NULL); + +/** + * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control * * @dev: drm_device pointer * - * Returns true if the device is a dGPU with HG/PX power control, + * Returns true if the device is a dGPU with ATPX power control, * otherwise return false. */ -bool amdgpu_device_is_px(struct drm_device *dev) +bool amdgpu_device_supports_px(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); - if (adev->flags & AMD_IS_PX) + if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) return true; return false; } /** - * VRAM access helper functions. + * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources * - * amdgpu_device_vram_access - read/write a buffer in vram + * @dev: drm_device pointer + * + * Returns true if the device is a dGPU with ACPI power control, + * otherwise return false. + */ +bool amdgpu_device_supports_boco(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + + if (adev->has_pr3 || + ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) + return true; + return false; +} + +/** + * amdgpu_device_supports_baco - Does the device support BACO + * + * @dev: drm_device pointer + * + * Returns true if the device supporte BACO, + * otherwise return false. + */ +bool amdgpu_device_supports_baco(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + + return amdgpu_asic_supports_baco(adev); +} + +/** + * amdgpu_device_supports_smart_shift - Is the device dGPU with + * smart shift support + * + * @dev: drm_device pointer + * + * Returns true if the device is a dGPU with Smart Shift support, + * otherwise returns false. + */ +bool amdgpu_device_supports_smart_shift(struct drm_device *dev) +{ + return (amdgpu_device_supports_boco(dev) && + amdgpu_acpi_is_power_shift_control_supported()); +} + +/* + * VRAM access helper functions + */ + +/** + * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA * * @adev: amdgpu_device pointer * @pos: offset of the buffer in vram @@ -164,30 +297,143 @@ bool amdgpu_device_is_px(struct drm_device *dev) * @size: read/write size, sizeof(@buf) must > @size * @write: true - write to vram, otherwise - read from vram */ -void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, - uint32_t *buf, size_t size, bool write) +void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) { - uint64_t last; unsigned long flags; + uint32_t hi = ~0, tmp = 0; + uint32_t *data = buf; + uint64_t last; + int idx; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; + + BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + for (last = pos + size; pos < last; pos += 4) { + tmp = pos >> 31; - last = size - 4; - for (last += pos; pos <= last; pos += 4) { - spin_lock_irqsave(&adev->mmio_idx_lock, flags); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); + if (tmp != hi) { + WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); + hi = tmp; + } if (write) - WREG32_NO_KIQ(mmMM_DATA, *buf++); + WREG32_NO_KIQ(mmMM_DATA, *data++); else - *buf++ = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + *data++ = RREG32_NO_KIQ(mmMM_DATA); + } + + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + drm_dev_exit(idx); +} + +/** + * amdgpu_device_vram_access - access vram by vram aperature + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + * + * The return value means how many bytes have been transferred. + */ +size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) +{ +#ifdef CONFIG_64BIT + void __iomem *addr; + size_t count = 0; + uint64_t last; + + if (!adev->mman.aper_base_kaddr) + return 0; + + last = min(pos + size, adev->gmc.visible_vram_size); + if (last > pos) { + addr = adev->mman.aper_base_kaddr + pos; + count = last - pos; + + if (write) { + memcpy_toio(addr, buf, count); + mb(); + amdgpu_device_flush_hdp(adev, NULL); + } else { + amdgpu_device_invalidate_hdp(adev, NULL); + mb(); + memcpy_fromio(buf, addr, count); + } + + } + + return count; +#else + return 0; +#endif +} + +/** + * amdgpu_device_vram_access - read/write a buffer in vram + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + */ +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) +{ + size_t count; + + /* try to using vram apreature to access vram first */ + count = amdgpu_device_aper_access(adev, pos, buf, size, write); + size -= count; + if (size) { + /* using MM to access rest vram */ + pos += count; + buf += count; + amdgpu_device_mm_access(adev, pos, buf, size, write); } } /* - * MMIO register access helper functions. + * register access helper functions. */ + +/* Check if hw access should be skipped because of hotplug or device error */ +bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) +{ + if (adev->no_hw_access) + return true; + +#ifdef CONFIG_LOCKDEP + /* + * This is a bit complicated to understand, so worth a comment. What we assert + * here is that the GPU reset is not running on another thread in parallel. + * + * For this we trylock the read side of the reset semaphore, if that succeeds + * we know that the reset is not running in paralell. + * + * If the trylock fails we assert that we are either already holding the read + * side of the lock or are the reset thread itself and hold the write side of + * the lock. + */ + if (in_task()) { + if (down_read_trylock(&adev->reset_sem)) + up_read(&adev->reset_sem); + else + lockdep_assert_held(&adev->reset_sem); + } +#endif + return false; +} + /** - * amdgpu_mm_rreg - read a memory mapped IO register + * amdgpu_device_rreg - read a memory mapped IO or indirect register * * @adev: amdgpu_device pointer * @reg: dword aligned register offset @@ -195,25 +441,29 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, * * Returns the 32 bit value from the offset specified. */ -uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, - uint32_t acc_flags) +uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t acc_flags) { uint32_t ret; - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_rreg(adev, reg); - - if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) - ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); - else { - unsigned long flags; + if (amdgpu_device_skip_hw_access(adev)) + return 0; - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); - ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + if ((reg * 4) < adev->rmmio_size) { + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && + amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_sem)) { + ret = amdgpu_kiq_rreg(adev, reg); + up_read(&adev->reset_sem); + } else { + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); + } + } else { + ret = adev->pcie_rreg(adev, reg * 4); } - trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); + + trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); + return ret; } @@ -231,7 +481,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, * * Returns the 8 bit value from the offset specified. */ -uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) { +uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) +{ + if (amdgpu_device_skip_hw_access(adev)) + return 0; + if (offset < adev->rmmio_size) return (readb(adev->rmmio + offset)); BUG(); @@ -252,7 +506,11 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) { * * Writes the value specified to the offset specified. */ -void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) { +void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) +{ + if (amdgpu_device_skip_hw_access(adev)) + return; + if (offset < adev->rmmio_size) writeb(value, adev->rmmio + offset); else @@ -260,7 +518,7 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) } /** - * amdgpu_mm_wreg - write to a memory mapped IO register + * amdgpu_device_wreg - write to a memory mapped IO or indirect register * * @adev: amdgpu_device pointer * @reg: dword aligned register offset @@ -269,76 +527,47 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) * * Writes the value specified to the offset specified. */ -void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, - uint32_t acc_flags) +void amdgpu_device_wreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, + uint32_t acc_flags) { - trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - - if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { - adev->last_mm_index = v; - } - - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_wreg(adev, reg, v); - - if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) - writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); - else { - unsigned long flags; + if (amdgpu_device_skip_hw_access(adev)) + return; - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); - writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + if ((reg * 4) < adev->rmmio_size) { + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && + amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_sem)) { + amdgpu_kiq_wreg(adev, reg, v); + up_read(&adev->reset_sem); + } else { + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + } + } else { + adev->pcie_wreg(adev, reg * 4, v); } - if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { - udelay(500); - } + trace_amdgpu_device_wreg(adev->pdev->device, reg, v); } -/** - * amdgpu_io_rreg - read an IO register - * - * @adev: amdgpu_device pointer - * @reg: dword aligned register offset - * - * Returns the 32 bit value from the offset specified. - */ -u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) -{ - if ((reg * 4) < adev->rio_mem_size) - return ioread32(adev->rio_mem + (reg * 4)); - else { - iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); - return ioread32(adev->rio_mem + (mmMM_DATA * 4)); - } -} - -/** - * amdgpu_io_wreg - write to an IO register - * - * @adev: amdgpu_device pointer - * @reg: dword aligned register offset - * @v: 32 bit value to write to the register +/* + * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range * - * Writes the value specified to the offset specified. - */ -void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) + * this function is invoked only the debugfs register access + * */ +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, + uint32_t reg, uint32_t v) { - if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { - adev->last_mm_index = v; - } - - if ((reg * 4) < adev->rio_mem_size) - iowrite32(v, adev->rio_mem + (reg * 4)); - else { - iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); - iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); - } + if (amdgpu_device_skip_hw_access(adev)) + return; - if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { - udelay(500); + if (amdgpu_sriov_fullaccess(adev) && + adev->gfx.rlc.funcs && + adev->gfx.rlc.funcs->is_rlcg_access_range) { + if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) + return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0); + } else { + writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); } } @@ -353,6 +582,9 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) */ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) { + if (amdgpu_device_skip_hw_access(adev)) + return 0; + if (index < adev->doorbell.num_doorbells) { return readl(adev->doorbell.ptr + index); } else { @@ -373,6 +605,9 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) */ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) { + if (amdgpu_device_skip_hw_access(adev)) + return; + if (index < adev->doorbell.num_doorbells) { writel(v, adev->doorbell.ptr + index); } else { @@ -391,6 +626,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) */ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) { + if (amdgpu_device_skip_hw_access(adev)) + return 0; + if (index < adev->doorbell.num_doorbells) { return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); } else { @@ -411,6 +649,9 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) */ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) { + if (amdgpu_device_skip_hw_access(adev)) + return; + if (index < adev->doorbell.num_doorbells) { atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); } else { @@ -419,9 +660,140 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) } /** + * amdgpu_device_indirect_rreg - read an indirect register + * + * @adev: amdgpu_device pointer + * @pcie_index: mmio register offset + * @pcie_data: mmio register offset + * @reg_addr: indirect register address to read from + * + * Returns the value of indirect register @reg_addr + */ +u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr) +{ + unsigned long flags; + u32 r; + void __iomem *pcie_index_offset; + void __iomem *pcie_data_offset; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + r = readl(pcie_data_offset); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + + return r; +} + +/** + * amdgpu_device_indirect_rreg64 - read a 64bits indirect register + * + * @adev: amdgpu_device pointer + * @pcie_index: mmio register offset + * @pcie_data: mmio register offset + * @reg_addr: indirect register address to read from + * + * Returns the value of indirect register @reg_addr + */ +u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr) +{ + unsigned long flags; + u64 r; + void __iomem *pcie_index_offset; + void __iomem *pcie_data_offset; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + + /* read low 32 bits */ + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + r = readl(pcie_data_offset); + /* read high 32 bits */ + writel(reg_addr + 4, pcie_index_offset); + readl(pcie_index_offset); + r |= ((u64)readl(pcie_data_offset) << 32); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + + return r; +} + +/** + * amdgpu_device_indirect_wreg - write an indirect register address + * + * @adev: amdgpu_device pointer + * @pcie_index: mmio register offset + * @pcie_data: mmio register offset + * @reg_addr: indirect register offset + * @reg_data: indirect register data + * + */ +void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr, u32 reg_data) +{ + unsigned long flags; + void __iomem *pcie_index_offset; + void __iomem *pcie_data_offset; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + writel(reg_data, pcie_data_offset); + readl(pcie_data_offset); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +/** + * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address + * + * @adev: amdgpu_device pointer + * @pcie_index: mmio register offset + * @pcie_data: mmio register offset + * @reg_addr: indirect register offset + * @reg_data: indirect register data + * + */ +void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, + u32 pcie_index, u32 pcie_data, + u32 reg_addr, u64 reg_data) +{ + unsigned long flags; + void __iomem *pcie_index_offset; + void __iomem *pcie_data_offset; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; + pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; + + /* write low 32 bits */ + writel(reg_addr, pcie_index_offset); + readl(pcie_index_offset); + writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); + readl(pcie_data_offset); + /* write high 32 bits */ + writel(reg_addr + 4, pcie_index_offset); + readl(pcie_index_offset); + writel((u32)(reg_data >> 32), pcie_data_offset); + readl(pcie_data_offset); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +/** * amdgpu_invalid_rreg - dummy reg read function * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @reg: offset of register * * Dummy register read function. Used for register blocks @@ -438,7 +810,7 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) /** * amdgpu_invalid_wreg - dummy reg write function * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @reg: offset of register * @v: value to write to the register * @@ -455,7 +827,7 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32 /** * amdgpu_invalid_rreg64 - dummy 64 bit reg read function * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @reg: offset of register * * Dummy register read function. Used for register blocks @@ -472,7 +844,7 @@ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) /** * amdgpu_invalid_wreg64 - dummy reg write function * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @reg: offset of register * @v: value to write to the register * @@ -489,7 +861,7 @@ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint /** * amdgpu_block_invalid_rreg - dummy reg read function * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @block: offset of instance * @reg: offset of register * @@ -509,7 +881,7 @@ static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, /** * amdgpu_block_invalid_wreg - dummy reg write function * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @block: offset of instance * @reg: offset of register * @v: value to write to the register @@ -527,9 +899,23 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, } /** + * amdgpu_device_asic_init - Wrapper for atom asic_init + * + * @adev: amdgpu_device pointer + * + * Does any asic specific work and then calls atom asic init. + */ +static int amdgpu_device_asic_init(struct amdgpu_device *adev) +{ + amdgpu_asic_pre_asic_init(adev); + + return amdgpu_atom_asic_init(adev->mode_info.atom_context); +} + +/** * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * * Allocates a scratch page of VRAM for use by various things in the * driver. @@ -546,7 +932,7 @@ static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) /** * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * * Frees the VRAM scratch page. */ @@ -607,6 +993,18 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); } +/** + * amdgpu_device_pci_reset - reset the GPU using generic PCI means + * + * @adev: amdgpu_device pointer + * + * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). + */ +int amdgpu_device_pci_reset(struct amdgpu_device *adev) +{ + return pci_reset_function(adev->pdev); +} + /* * GPU doorbell aperture helpers function. */ @@ -783,8 +1181,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) */ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) { - u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); - u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; + int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); struct pci_bus *root; struct resource *res; unsigned i; @@ -795,6 +1192,11 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + /* skip if the bios has already enabled large BAR */ + if (adev->gmc.real_vram_size && + (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) + return 0; + /* Check if the root BUS has 64bit memory resources */ root = adev->pdev->bus; while (root->parent) @@ -810,6 +1212,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (!res) return 0; + /* Limit the BAR size to what is available */ + rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, + rbar_size); + /* Disable memory decoding while we change the BAR addresses and size */ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); pci_write_config_word(adev->pdev, PCI_COMMAND, @@ -881,6 +1287,10 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } } + /* Don't post if we need to reset whole hive on init */ + if (adev->gmc.xgmi.pending_reset) + return false; + if (adev->has_hw_reset) { adev->has_hw_reset = false; return true; @@ -903,15 +1313,16 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) /** * amdgpu_device_vga_set_decode - enable/disable vga decode * - * @cookie: amdgpu_device pointer + * @pdev: PCI device pointer * @state: enable/disable vga decode * * Enable/disable vga decode (all asics). * Returns VGA resource flags. */ -static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) +static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, + bool state) { - struct amdgpu_device *adev = cookie; + struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); amdgpu_asic_set_vga_state(adev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -969,7 +1380,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) { struct sysinfo si; - bool is_os_64 = (sizeof(void *) == 8) ? true : false; + bool is_os_64 = (sizeof(void *) == 8); uint64_t total_memory; uint64_t dram_size_seven_GB = 0x1B8000000; uint64_t dram_size_three_GB = 0xB8000000; @@ -1006,6 +1417,42 @@ def_value: adev->pm.smu_prv_buffer_size = 0; } +static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) +{ + if (!(adev->flags & AMD_IS_APU) || + adev->asic_type < CHIP_RAVEN) + return 0; + + switch (adev->asic_type) { + case CHIP_RAVEN: + if (adev->pdev->device == 0x15dd) + adev->apu_flags |= AMD_APU_IS_RAVEN; + if (adev->pdev->device == 0x15d8) + adev->apu_flags |= AMD_APU_IS_PICASSO; + break; + case CHIP_RENOIR: + if ((adev->pdev->device == 0x1636) || + (adev->pdev->device == 0x164c)) + adev->apu_flags |= AMD_APU_IS_RENOIR; + else + adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; + break; + case CHIP_VANGOGH: + adev->apu_flags |= AMD_APU_IS_VANGOGH; + break; + case CHIP_YELLOW_CARP: + break; + case CHIP_CYAN_SKILLFISH: + if (adev->pdev->device == 0x13FE) + adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; + break; + default: + return -EINVAL; + } + + return 0; +} + /** * amdgpu_device_check_arguments - validate module params * @@ -1016,8 +1463,6 @@ def_value: */ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) { - int ret = 0; - if (amdgpu_sched_jobs < 4) { dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); @@ -1049,6 +1494,16 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_vm_fragment_size = -1; } + if (amdgpu_sched_hw_submission < 2) { + dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", + amdgpu_sched_hw_submission); + amdgpu_sched_hw_submission = 2; + } else if (!is_power_of_2(amdgpu_sched_hw_submission)) { + dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", + amdgpu_sched_hw_submission); + amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission); + } + amdgpu_device_check_smu_prv_buffer_size(adev); amdgpu_device_check_vm_size(adev); @@ -1057,7 +1512,11 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - return ret; + amdgpu_gmc_tmz_set(adev); + + amdgpu_gmc_noretry_set(adev); + + return 0; } /** @@ -1069,27 +1528,36 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) * Callback for the switcheroo driver. Suspends or resumes the * the asics before or after it is powered up using ACPI methods. */ -static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, + enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); + int r; - if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) + if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { - pr_info("amdgpu: switched on\n"); + pr_info("switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_resume(dev, true, true); + pci_set_power_state(pdev, PCI_D0); + amdgpu_device_load_pci_state(pdev); + r = pci_enable_device(pdev); + if (r) + DRM_WARN("pci_enable_device failed (%d)\n", r); + amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; - drm_kms_helper_poll_enable(dev); } else { - pr_info("amdgpu: switched off\n"); - drm_kms_helper_poll_disable(dev); + pr_info("switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_suspend(dev, true, true); + amdgpu_device_suspend(dev, true); + amdgpu_device_cache_pci_state(pdev); + /* Shut down the device */ + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1112,7 +1580,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return atomic_read(&dev->open_count) == 0; } static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { @@ -1327,6 +1795,19 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, if (!ip_block_version) return -EINVAL; + switch (ip_block_version->type) { + case AMD_IP_BLOCK_TYPE_VCN: + if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) + return 0; + break; + case AMD_IP_BLOCK_TYPE_JPEG: + if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) + return 0; + break; + default: + break; + } + DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, ip_block_version->funcs->name); @@ -1352,8 +1833,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) adev->enable_virtual_display = false; if (amdgpu_virtual_display) { - struct drm_device *ddev = adev->ddev; - const char *pci_address_name = pci_name(ddev->pdev); + const char *pci_address_name = pci_name(adev->pdev); char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); @@ -1405,22 +1885,25 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; + char fw_name[40]; int err; const struct gpu_info_firmware_header_v1_0 *hdr; adev->firmware.gpu_info_fw = NULL; + if (adev->mman.discovery_bin) { + amdgpu_discovery_get_gfx_info(adev); + + /* + * FIXME: The bounding box is still needed by Navi12, so + * temporarily read it from gpu_info firmware. Should be droped + * when DAL no longer needs it. + */ + if (adev->asic_type != CHIP_NAVI12) + return 0; + } + switch (adev->asic_type) { - case CHIP_TOPAZ: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - case CHIP_CARRIZO: - case CHIP_STONEY: #ifdef CONFIG_DRM_AMDGPU_SI case CHIP_VERDE: case CHIP_TAHITI: @@ -1435,7 +1918,21 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_KABINI: case CHIP_MULLINS: #endif + case CHIP_TOPAZ: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_CARRIZO: + case CHIP_STONEY: case CHIP_VEGA20: + case CHIP_ALDEBARAN: + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: default: return 0; case CHIP_VEGA10: @@ -1445,9 +1942,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "vega12"; break; case CHIP_RAVEN: - if (adev->rev_id >= 8) + if (adev->apu_flags & AMD_APU_IS_RAVEN2) chip_name = "raven2"; - else if (adev->pdev->device == 0x15d8) + else if (adev->apu_flags & AMD_APU_IS_PICASSO) chip_name = "picasso"; else chip_name = "raven"; @@ -1456,7 +1953,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "arcturus"; break; case CHIP_RENOIR: - chip_name = "renoir"; + if (adev->apu_flags & AMD_APU_IS_RENOIR) + chip_name = "renoir"; + else + chip_name = "green_sardine"; break; case CHIP_NAVI10: chip_name = "navi10"; @@ -1467,6 +1967,12 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_NAVI12: chip_name = "navi12"; break; + case CHIP_VANGOGH: + chip_name = "vangogh"; + break; + case CHIP_YELLOW_CARP: + chip_name = "yellow_carp"; + break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); @@ -1495,7 +2001,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + /* + * Should be droped when DAL no longer needs it. + */ + if (adev->asic_type == CHIP_NAVI12) goto parse_soc_bounding_box; adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); @@ -1527,10 +2036,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) } parse_soc_bounding_box: -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /* * soc bounding box info is not integrated in disocovery table, - * we always need to parse it from gpu info firmware. + * we always need to parse it from gpu info firmware if needed. */ if (hdr->version_minor == 2) { const struct gpu_info_firmware_v1_2 *gpu_info_fw = @@ -1538,7 +2046,6 @@ parse_soc_bounding_box: le32_to_cpu(hdr->header.ucode_array_offset_bytes)); adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; } -#endif break; } default: @@ -1567,25 +2074,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) amdgpu_device_enable_virtual_display(adev); - switch (adev->asic_type) { - case CHIP_TOPAZ: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - case CHIP_CARRIZO: - case CHIP_STONEY: - if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) - adev->family = AMDGPU_FAMILY_CZ; - else - adev->family = AMDGPU_FAMILY_VI; - - r = vi_set_ip_blocks(adev); + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_request_full_gpu(adev, true); if (r) return r; - break; + } + + switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_SI case CHIP_VERDE: case CHIP_TAHITI: @@ -1604,64 +2099,48 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: - if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII)) - adev->family = AMDGPU_FAMILY_CI; - else + if (adev->flags & AMD_IS_APU) adev->family = AMDGPU_FAMILY_KV; + else + adev->family = AMDGPU_FAMILY_CI; r = cik_set_ip_blocks(adev); if (r) return r; break; #endif - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - if (adev->asic_type == CHIP_RAVEN || - adev->asic_type == CHIP_RENOIR) - adev->family = AMDGPU_FAMILY_RV; + case CHIP_TOPAZ: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_CARRIZO: + case CHIP_STONEY: + if (adev->flags & AMD_IS_APU) + adev->family = AMDGPU_FAMILY_CZ; else - adev->family = AMDGPU_FAMILY_AI; + adev->family = AMDGPU_FAMILY_VI; - r = soc15_set_ip_blocks(adev); + r = vi_set_ip_blocks(adev); if (r) return r; break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - adev->family = AMDGPU_FAMILY_NV; - - r = nv_set_ip_blocks(adev); + default: + r = amdgpu_discovery_set_ip_blocks(adev); if (r) return r; break; - default: - /* FIXME: not supported yet */ - return -EINVAL; } - r = amdgpu_device_parse_gpu_info_fw(adev); - if (r) - return r; - - if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) - amdgpu_discovery_get_gfx_info(adev); - amdgpu_amdkfd_device_probe(adev); - if (amdgpu_sriov_vf(adev)) { - r = amdgpu_virt_request_full_gpu(adev, true); - if (r) - return -EAGAIN; - } - adev->pm.pp_feature = amdgpu_pp_feature_mask; if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) + adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { @@ -1686,6 +2165,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } /* get the vbios after the asic_funcs are set up */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { + r = amdgpu_device_parse_gpu_info_fw(adev); + if (r) + return r; + /* Read BIOS */ if (!amdgpu_get_bios(adev)) return -EINVAL; @@ -1696,6 +2179,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); return r; } + + /*get pf2vf msg info at it's earliest time*/ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + } } @@ -1762,11 +2250,14 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) continue; + if (!adev->ip_blocks[i].status.sw) + continue; + /* no need to do the fw loading again if already done*/ if (adev->ip_blocks[i].status.hw == true) break; - if (adev->in_gpu_reset || adev->in_suspend) { + if (amdgpu_in_reset(adev) || adev->in_suspend) { r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { DRM_ERROR("resume of IP block <%s> failed %d\n", @@ -1787,7 +2278,8 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) } } - r = amdgpu_pm_load_smu_firmware(adev, &smu_version); + if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) + r = amdgpu_pm_load_smu_firmware(adev, &smu_version); return r; } @@ -1854,6 +2346,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); @@ -1882,24 +2377,36 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) * it should be called after amdgpu_device_ip_hw_init_phase2 since * for some ASICs the RAS EEPROM code relies on SMU fully functioning * for I2C communication which only true at this point. - * recovery_init may fail, but it can free all resources allocated by - * itself and its failure should not stop amdgpu init process. + * + * amdgpu_ras_recovery_init may fail, but the upper only cares the + * failure from bad gpu situation and stop amdgpu init process + * accordingly. For other failed cases, it will still release all + * the resource and print error message, rather than returning one + * negative value to upper level. * * Note: theoretically, this should be called before all vram allocations * to protect retired page from abusing */ - amdgpu_ras_recovery_init(adev); + r = amdgpu_ras_recovery_init(adev); + if (r) + goto init_failed; if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_add_device(adev); - amdgpu_amdkfd_device_init(adev); + + /* Don't init kfd if whole hive need to be reset during init */ + if (!adev->gmc.xgmi.pending_reset) + amdgpu_amdkfd_device_init(adev); + + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + goto init_failed; + + amdgpu_fru_get_product_info(adev); init_failed: - if (amdgpu_sriov_vf(adev)) { - if (!r) - amdgpu_virt_init_data_exchange(adev); + if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, true); - } return r; } @@ -1930,14 +2437,31 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) */ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) { - return !!memcmp(adev->gart.ptr, adev->reset_magic, - AMDGPU_RESET_MAGIC_NUM); + if (memcmp(adev->gart.ptr, adev->reset_magic, + AMDGPU_RESET_MAGIC_NUM)) + return true; + + if (!amdgpu_in_reset(adev)) + return false; + + /* + * For all ASICs with baco/mode1 reset, the VRAM is + * always assumed to be lost. + */ + switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_BACO: + case AMD_RESET_METHOD_MODE1: + return true; + default: + return false; + } } /** * amdgpu_device_set_cg_state - set clockgating for amdgpu device * * @adev: amdgpu_device pointer + * @state: clockgating state (gate or ungate) * * The list of all the hardware IPs that make up the asic is walked and the * set_clockgating_state callbacks are run. @@ -1946,8 +2470,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * Returns 0 on success, negative error code on failure. */ -static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, - enum amd_clockgating_state state) +int amdgpu_device_set_cg_state(struct amdgpu_device *adev, + enum amd_clockgating_state state) { int i, j, r; @@ -1958,10 +2482,15 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; + /* skip CG for GFX on S0ix */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1977,7 +2506,8 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, return 0; } -static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state) +int amdgpu_device_set_pg_state(struct amdgpu_device *adev, + enum amd_powergating_state state) { int i, j, r; @@ -1988,10 +2518,15 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.late_initialized) continue; + /* skip PG for GFX on S0ix */ + if (adev->in_s0ix && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) + continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, @@ -2026,9 +2561,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void) gpu_ins = &(mgpu_info.gpu_ins[i]); adev = gpu_ins->adev; if (!(adev->flags & AMD_IS_APU) && - !gpu_ins->mgpu_fan_enabled && - adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->enable_mgpu_fan_boost) { + !gpu_ins->mgpu_fan_enabled) { ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); if (ret) break; @@ -2074,6 +2607,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = true; } + amdgpu_ras_set_error_query_ready(adev, true); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); @@ -2083,6 +2618,11 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (r) DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); + /* For XGMI + passthrough configuration on arcturus, enable light SBR */ + if (adev->asic_type == CHIP_ARCTURUS && + amdgpu_passthrough(adev) && + adev->gmc.xgmi.num_physical_nodes > 1) + smu_set_light_sbr(&adev->smu, true); if (adev->gmc.xgmi.num_physical_nodes > 1) { mutex_lock(&mgpu_info.mutex); @@ -2106,7 +2646,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (gpu_instance->adev->flags & AMD_IS_APU) continue; - r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); + r = amdgpu_xgmi_set_pstate(gpu_instance->adev, + AMDGPU_XGMI_PSTATE_MIN); if (r) { DRM_ERROR("pstate setting failed (%d).\n", r); break; @@ -2120,27 +2661,22 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } -/** - * amdgpu_device_ip_fini - run fini for hardware IPs - * - * @adev: amdgpu_device pointer - * - * Main teardown pass for hardware IPs. The list of all the hardware - * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks - * are run. hw_fini tears down the hardware associated with each IP - * and sw_fini tears down any software state associated with each IP. - * Returns 0 on success, negative error code on failure. - */ -static int amdgpu_device_ip_fini(struct amdgpu_device *adev) +static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) { int i, r; - amdgpu_ras_pre_fini(adev); + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].version->funcs->early_fini) + continue; - if (adev->gmc.xgmi.num_physical_nodes > 1) - amdgpu_xgmi_remove_device(adev); + r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); + if (r) { + DRM_DEBUG("early_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + } - amdgpu_amdkfd_device_fini(adev); + amdgpu_amdkfd_suspend(adev, false); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); @@ -2175,6 +2711,38 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_release_full_gpu(adev, false)) + DRM_ERROR("failed to release exclusive mode on fini\n"); + } + + return 0; +} + +/** + * amdgpu_device_ip_fini - run fini for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main teardown pass for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks + * are run. hw_fini tears down the hardware associated with each IP + * and sw_fini tears down any software state associated with each IP. + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_fini(struct amdgpu_device *adev) +{ + int i, r; + + if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) + amdgpu_virt_release_ras_err_handler_data(adev); + + amdgpu_ras_pre_fini(adev); + + if (adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_remove_device(adev); + + amdgpu_amdkfd_device_fini_sw(adev); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.sw) @@ -2208,10 +2776,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ras_fini(adev); - if (amdgpu_sriov_vf(adev)) - if (amdgpu_virt_release_full_gpu(adev, false)) - DRM_ERROR("failed to release exclusive mode on fini\n"); - return 0; } @@ -2236,12 +2800,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); - mutex_lock(&adev->gfx.gfx_off_mutex); - if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } - mutex_unlock(&adev->gfx.gfx_off_mutex); + WARN_ON_ONCE(adev->gfx.gfx_off_state); + WARN_ON_ONCE(adev->gfx.gfx_off_req_count); + + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; } /** @@ -2265,18 +2828,21 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; + /* displays are handled separately */ - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { - /* XXX handle errors */ - r = adev->ip_blocks[i].version->funcs->suspend(adev); - /* XXX handle errors */ - if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - adev->ip_blocks[i].status.hw = false; + if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) + continue; + + /* XXX handle errors */ + r = adev->ip_blocks[i].version->funcs->suspend(adev); + /* XXX handle errors */ + if (r) { + DRM_ERROR("suspend of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; } + + adev->ip_blocks[i].status.hw = false; } return 0; @@ -2297,6 +2863,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { int i, r; + if (adev->in_s0ix) + amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2309,6 +2878,27 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; continue; } + + /* skip unnecessary suspend if we do not initialize them yet */ + if (adev->gmc.xgmi.pending_reset && + !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { + adev->ip_blocks[i].status.hw = false; + continue; + } + + /* skip suspend of gfx and psp for S0ix + * gfx is in gfxoff state, so on resume it will exit gfxoff just + * like at runtime. PSP is also part of the always on hardware + * so no need to suspend it. + */ + if (adev->in_s0ix && + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) + continue; + /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -2318,23 +2908,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } adev->ip_blocks[i].status.hw = false; /* handle putting the SMC in the appropriate state */ - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - if (is_support_sw_smu(adev)) { - r = smu_set_mp1_state(&adev->smu, adev->mp1_state); - } else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_mp1_state) { - r = adev->powerplay.pp_funcs->set_mp1_state( - adev->powerplay.pp_handle, - adev->mp1_state); - } - if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); - return r; + if(!amdgpu_sriov_vf(adev)){ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); + if (r) { + DRM_ERROR("SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); + return r; + } } } - - adev->ip_blocks[i].status.hw = false; } return 0; @@ -2355,8 +2938,10 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int r; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_request_full_gpu(adev, false); + } r = amdgpu_device_ip_suspend_phase1(adev); if (r) @@ -2380,15 +2965,16 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_IH, }; - for (i = 0; i < ARRAY_SIZE(ip_order); i++) { + for (i = 0; i < adev->num_ip_blocks; i++) { int j; struct amdgpu_ip_block *block; - for (j = 0; j < adev->num_ip_blocks; j++) { - block = &adev->ip_blocks[j]; + block = &adev->ip_blocks[i]; + block->status.hw = false; - block->status.hw = false; - if (block->version->type != ip_order[i] || + for (j = 0; j < ARRAY_SIZE(ip_order); j++) { + + if (block->version->type != ip_order[j] || !block->status.valid) continue; @@ -2413,7 +2999,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, AMD_IP_BLOCK_TYPE_UVD, - AMD_IP_BLOCK_TYPE_VCE + AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_VCN }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { @@ -2428,7 +3015,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) block->status.hw) continue; - r = block->version->funcs->hw_init(adev); + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) + r = block->version->funcs->resume(adev); + else + r = block->version->funcs->hw_init(adev); + DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; @@ -2528,6 +3119,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; @@ -2552,7 +3147,7 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { if (adev->is_atom_fw) { - if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) + if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; } else { if (amdgpu_atombios_has_gpu_virtualization_table(adev)) @@ -2576,6 +3171,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: +#endif case CHIP_BONAIRE: case CHIP_KAVERI: case CHIP_KABINI: @@ -2600,49 +3201,93 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: + case CHIP_CYAN_SKILLFISH: + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: #endif + default: return amdgpu_dc != 0; -#endif +#else default: + if (amdgpu_dc > 0) + DRM_INFO_ONCE("Display Core has been requested via kernel parameter " + "but isn't supported by ASIC, ignoring\n"); return false; +#endif } } /** * amdgpu_device_has_dc_support - check if dc is supported * - * @adev: amdgpu_device_pointer + * @adev: amdgpu_device pointer * * Returns true for supported, false for not supported */ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) || + adev->enable_virtual_display || + (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; return amdgpu_device_asic_has_dc_support(adev->asic_type); } - static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) { struct amdgpu_device *adev = container_of(__work, struct amdgpu_device, xgmi_reset_work); + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); - adev->asic_reset_res = amdgpu_asic_reset(adev); + /* It's a bug to not have a hive within this function */ + if (WARN_ON(!hive)) + return; + + /* + * Use task barrier to synchronize all xgmi reset works across the + * hive. task_barrier_enter and task_barrier_exit will block + * until all the threads running the xgmi reset works reach + * those points. task_barrier_full will do both blocks. + */ + if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + + task_barrier_enter(&hive->tb); + adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); + + if (adev->asic_reset_res) + goto fail; + + task_barrier_exit(&hive->tb); + adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); + + if (adev->asic_reset_res) + goto fail; + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_count) + adev->mmhub.ras_funcs->reset_ras_error_count(adev); + } else { + + task_barrier_full(&hive->tb); + adev->asic_reset_res = amdgpu_asic_reset(adev); + } + +fail: if (adev->asic_reset_res) DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", - adev->asic_reset_res, adev->ddev->unique); + adev->asic_reset_res, adev_to_drm(adev)->unique); + amdgpu_put_xgmi_hive(hive); } static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) @@ -2654,17 +3299,18 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) int ret = 0; /* - * By default timeout for non compute jobs is 10000. - * And there is no timeout enforced on compute jobs. + * By default timeout for non compute jobs is 10000 + * and 60000 for compute jobs. * In SR-IOV or passthrough mode, timeout for compute - * jobs are 10000 by default. + * jobs are 60000 by default. */ adev->gfx_timeout = msecs_to_jiffies(10000); adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) - adev->compute_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev)) + adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? + msecs_to_jiffies(60000) : msecs_to_jiffies(10000); else - adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; + adev->compute_timeout = msecs_to_jiffies(60000); if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { while ((timeout_setting = strsep(&input, ",")) && @@ -2678,6 +3324,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) continue; } else if (timeout < 0) { timeout = MAX_SCHEDULE_TIMEOUT; + dev_warn(adev->dev, "lockup timeout disabled"); + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); } else { timeout = msecs_to_jiffies(timeout); } @@ -2713,12 +3361,18 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) return ret; } +static const struct attribute *amdgpu_dev_attributes[] = { + &dev_attr_product_name.attr, + &dev_attr_product_number.attr, + &dev_attr_serial_number.attr, + &dev_attr_pcie_replay_count.attr, + NULL +}; + /** * amdgpu_device_init - initialize the driver * * @adev: amdgpu_device pointer - * @ddev: drm dev pointer - * @pdev: pci dev pointer * @flags: driver flags * * Initializes the driver info and hw (all asics). @@ -2726,18 +3380,15 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) * Called at driver startup. */ int amdgpu_device_init(struct amdgpu_device *adev, - struct drm_device *ddev, - struct pci_dev *pdev, uint32_t flags) { + struct drm_device *ddev = adev_to_drm(adev); + struct pci_dev *pdev = adev->pdev; int r, i; - bool runtime = false; + bool px = false; u32 max_MBps; adev->shutdown = false; - adev->dev = &pdev->dev; - adev->ddev = ddev; - adev->pdev = pdev; adev->flags = flags; if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) @@ -2747,15 +3398,16 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; if (amdgpu_emu_mode == 1) - adev->usec_timeout *= 2; + adev->usec_timeout *= 10; adev->gmc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs_ring = NULL; adev->vm_manager.vm_pte_funcs = NULL; - adev->vm_manager.vm_pte_num_rqs = 0; + adev->vm_manager.vm_pte_num_scheds = 0; adev->gmc.gmc_funcs = NULL; + adev->harvest_ip_mask = 0x0; adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); @@ -2782,7 +3434,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* mutex initialization are all done here so we * can recall function without having locking issues */ - atomic_set(&adev->irq.ih.lock, 0); mutex_init(&adev->firmware.mutex); mutex_init(&adev->pm.mutex); mutex_init(&adev->gfx.gpu_clock_mutex); @@ -2793,10 +3444,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); - mutex_init(&adev->lock_reset); - mutex_init(&adev->notifier_lock); - mutex_init(&adev->virt.dpm_mutex); + atomic_set(&adev->in_gpu_reset, 0); + init_rwsem(&adev->reset_sem); mutex_init(&adev->psp.mutex); + mutex_init(&adev->notifier_lock); + + r = amdgpu_device_init_apu_flags(adev); + if (r) + return r; r = amdgpu_device_check_arguments(adev); if (r) @@ -2815,8 +3470,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->shadow_list); mutex_init(&adev->shadow_list_lock); - INIT_LIST_HEAD(&adev->ring_lru_list); - spin_lock_init(&adev->ring_lru_list_lock); + INIT_LIST_HEAD(&adev->reset_list); INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); @@ -2826,7 +3480,18 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); adev->gfx.gfx_off_req_count = 1; - adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; + adev->pm.ac_power = power_supply_is_system_supplied() > 0; + + atomic_set(&adev->throttling_logging_enabled, 1); + /* + * If throttling continues, logging will be performed every minute + * to avoid log flooding. "-1" is subtracted since the thermal + * throttling interrupt comes every second. Thus, the total logging + * interval is 59 seconds(retelimited printk interval) + 1(waiting + * for throttling interrupt) = 60 seconds. + */ + ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); + ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); /* Registers mapping */ /* TODO: block userspace mapping of io register */ @@ -2845,28 +3510,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* io port mapping */ - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { - adev->rio_mem_size = pci_resource_len(adev->pdev, i); - adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size); - break; - } - } - if (adev->rio_mem == NULL) - DRM_INFO("PCI I/O BAR is not found.\n"); - - /* enable PCIE atomic ops */ - r = pci_enable_atomic_ops_to_root(adev->pdev, - PCI_EXP_DEVCAP2_ATOMIC_COMP32 | - PCI_EXP_DEVCAP2_ATOMIC_COMP64); - if (r) { - adev->have_atomics_support = false; - DRM_INFO("PCIE atomic ops is not supported\n"); - } else { - adev->have_atomics_support = true; - } - amdgpu_device_get_pcie_info(adev); if (amdgpu_mcbp) @@ -2875,12 +3518,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) adev->enable_mes = true; - if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) { - r = amdgpu_discovery_init(adev); - if (r) { - dev_err(adev->dev, "amdgpu_discovery_init failed\n"); - return r; - } + /* detect hw virtualization here */ + amdgpu_detect_virtualization(adev); + + r = amdgpu_device_get_job_timeout_settings(adev); + if (r) { + dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); + return r; } /* early init functions */ @@ -2888,34 +3532,30 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; - r = amdgpu_device_get_job_timeout_settings(adev); - if (r) { - dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - return r; - } + /* enable PCIE atomic ops */ + if (amdgpu_sriov_vf(adev)) + adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) + adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags == + (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + else + adev->have_atomics_support = + !pci_enable_atomic_ops_to_root(adev->pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (!adev->have_atomics_support) + dev_info(adev->dev, "PCIE atomic ops is not supported\n"); /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); - /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ - /* this will fail for cards that aren't VGA class devices, just - * ignore it */ - vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); - - if (amdgpu_device_is_px(ddev)) - runtime = true; - if (!pci_is_thunderbolt_attached(adev->pdev)) - vga_switcheroo_register_client(adev->pdev, - &amdgpu_switcheroo_ops, runtime); - if (runtime) - vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (amdgpu_emu_mode == 1) { /* post the asic on emulation mode */ emu_soc_asic_init(adev); goto fence_driver_init; } + amdgpu_reset_init(adev); + /* detect if we are with an SRIOV vbios */ amdgpu_device_detect_sriov_bios(adev); @@ -2923,13 +3563,33 @@ int amdgpu_device_init(struct amdgpu_device *adev, * E.g., driver was not cleanly unloaded previously, etc. */ if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { - r = amdgpu_asic_reset(adev); - if (r) { - dev_err(adev->dev, "asic reset on init failed\n"); - goto failed; + if (adev->gmc.xgmi.num_physical_nodes) { + dev_info(adev->dev, "Pending hive reset.\n"); + adev->gmc.xgmi.pending_reset = true; + /* Only need to init necessary block for SMU to handle the reset */ + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { + DRM_DEBUG("IP %s disabled for hw_init.\n", + adev->ip_blocks[i].version->funcs->name); + adev->ip_blocks[i].status.hw = true; + } + } + } else { + r = amdgpu_asic_reset(adev); + if (r) { + dev_err(adev->dev, "asic reset on init failed\n"); + goto failed; + } } } + pci_enable_pcie_error_reporting(adev->pdev); + /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { if (!adev->bios) { @@ -2938,7 +3598,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } DRM_INFO("GPU posting now...\n"); - r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + r = amdgpu_device_asic_init(adev); if (r) { dev_err(adev->dev, "gpu post error!\n"); goto failed; @@ -2968,15 +3628,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, fence_driver_init: /* Fence driver */ - r = amdgpu_fence_driver_init(adev); + r = amdgpu_fence_driver_sw_init(adev); if (r) { - dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); + dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); goto failed; } /* init the mode config */ - drm_mode_config_init(adev->ddev); + drm_mode_config_init(adev_to_drm(adev)); r = amdgpu_device_ip_init(adev); if (r) { @@ -2990,15 +3650,22 @@ fence_driver_init: adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; adev->virt.ops = NULL; r = -EAGAIN; - goto failed; + goto release_ras_con; } dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); - if (amdgpu_virt_request_full_gpu(adev, false)) - amdgpu_virt_release_full_gpu(adev, false); - goto failed; + goto release_ras_con; } + amdgpu_fence_driver_hw_init(adev); + + dev_info(adev->dev, + "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", + adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se, + adev->gfx.config.max_cu_per_sh, + adev->gfx.cu_info.number); + adev->accel_working = true; amdgpu_vm_check_compute_bug(adev); @@ -3013,32 +3680,19 @@ fence_driver_init: amdgpu_fbdev_init(adev); - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) - amdgpu_pm_virt_sysfs_init(adev); - r = amdgpu_pm_sysfs_init(adev); - if (r) + if (r) { + adev->pm_sysfs_en = false; DRM_ERROR("registering pm debugfs failed (%d).\n", r); + } else + adev->pm_sysfs_en = true; r = amdgpu_ucode_sysfs_init(adev); - if (r) + if (r) { + adev->ucode_sysfs_en = false; DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); - - r = amdgpu_debugfs_gem_init(adev); - if (r) - DRM_ERROR("registering gem debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_regs_init(adev); - if (r) - DRM_ERROR("registering register debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_firmware_init(adev); - if (r) - DRM_ERROR("registering firmware debugfs failed (%d).\n", r); - - r = amdgpu_debugfs_init(adev); - if (r) - DRM_ERROR("Creating debugfs files failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; if ((amdgpu_testing & 1)) { if (adev->accel_working) @@ -3063,40 +3717,84 @@ fence_driver_init: /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ - r = amdgpu_device_ip_late_init(adev); - if (r) { - dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); - goto failed; + if (!adev->gmc.xgmi.pending_reset) { + r = amdgpu_device_ip_late_init(adev); + if (r) { + dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); + goto release_ras_con; + } + /* must succeed. */ + amdgpu_ras_resume(adev); + queue_delayed_work(system_wq, &adev->delayed_init_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); } - /* must succeed. */ - amdgpu_ras_resume(adev); - - queue_delayed_work(system_wq, &adev->delayed_init_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + if (amdgpu_sriov_vf(adev)) + flush_delayed_work(&adev->delayed_init_work); - r = device_create_file(adev->dev, &dev_attr_pcie_replay_count); - if (r) { - dev_err(adev->dev, "Could not create pcie_replay_count"); - return r; - } + r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); + if (r) + dev_err(adev->dev, "Could not create amdgpu device attr\n"); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); if (r) dev_err(adev->dev, "amdgpu_pmu_init failed\n"); + /* Have stored pci confspace at hand for restore in sudden PCI error */ + if (amdgpu_device_cache_pci_state(adev->pdev)) + pci_restore_state(pdev); + + /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ + /* this will fail for cards that aren't VGA class devices, just + * ignore it */ + if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) + vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); + + if (amdgpu_device_supports_px(ddev)) { + px = true; + vga_switcheroo_register_client(adev->pdev, + &amdgpu_switcheroo_ops, px); + vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); + } + + if (adev->gmc.xgmi.pending_reset) + queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); + return 0; +release_ras_con: + amdgpu_release_ras_context(adev); + failed: amdgpu_vf_error_trans_all(adev); - if (runtime) - vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; } +static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) +{ + /* Clear all CPU mappings pointing to this device */ + unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); + + /* Unmap all mapped bars - Doorbell, registers and VRAM */ + amdgpu_device_doorbell_fini(adev); + + iounmap(adev->rmmio); + adev->rmmio = NULL; + if (adev->mman.aper_base_kaddr) + iounmap(adev->mman.aper_base_kaddr); + adev->mman.aper_base_kaddr = NULL; + + /* Memory manager related */ + if (!adev->gmc.xgmi.connected_to_cpu) { + arch_phys_wc_del(adev->gmc.vram_mtrr); + arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + } +} + /** * amdgpu_device_fini - tear down the driver * @@ -3105,31 +3803,63 @@ failed: * Tear down the driver info (all asics). * Called at driver shutdown. */ -void amdgpu_device_fini(struct amdgpu_device *adev) +void amdgpu_device_fini_hw(struct amdgpu_device *adev) { - int r; - - DRM_INFO("amdgpu: finishing device.\n"); + dev_info(adev->dev, "amdgpu: finishing device.\n"); flush_delayed_work(&adev->delayed_init_work); + if (adev->mman.initialized) { + flush_delayed_work(&adev->mman.bdev.wq); + ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + } adev->shutdown = true; + /* make sure IB test finished before entering exclusive mode + * to avoid preemption on IB test + * */ + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_request_full_gpu(adev, false); + amdgpu_virt_fini_data_exchange(adev); + } + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ if (!amdgpu_device_has_dc_support(adev)) - drm_helper_force_disable_all(adev->ddev); + drm_helper_force_disable_all(adev_to_drm(adev)); else - drm_atomic_helper_shutdown(adev->ddev); + drm_atomic_helper_shutdown(adev_to_drm(adev)); } - amdgpu_fence_driver_fini(adev); - amdgpu_pm_sysfs_fini(adev); + amdgpu_fence_driver_hw_fini(adev); + + if (adev->pm_sysfs_en) + amdgpu_pm_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); + sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); + amdgpu_fbdev_fini(adev); - r = amdgpu_device_ip_fini(adev); - if (adev->firmware.gpu_info_fw) { - release_firmware(adev->firmware.gpu_info_fw); - adev->firmware.gpu_info_fw = NULL; - } + + amdgpu_device_ip_fini_early(adev); + + amdgpu_irq_fini_hw(adev); + + ttm_device_clear_dma_mappings(&adev->mman.bdev); + + amdgpu_gart_dummy_page_fini(adev); + + amdgpu_device_unmap_mmio(adev); +} + +void amdgpu_device_fini_sw(struct amdgpu_device *adev) +{ + amdgpu_fence_driver_sw_fini(adev); + amdgpu_device_ip_fini(adev); + release_firmware(adev->firmware.gpu_info_fw); + adev->firmware.gpu_info_fw = NULL; adev->accel_working = false; + + amdgpu_reset_fini(adev); + /* free i2c buses */ if (!amdgpu_device_has_dc_support(adev)) amdgpu_i2c_fini(adev); @@ -3139,30 +3869,41 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; - if (!pci_is_thunderbolt_attached(adev->pdev)) + if (amdgpu_device_supports_px(adev_to_drm(adev))) { vga_switcheroo_unregister_client(adev->pdev); - if (adev->flags & AMD_IS_PX) vga_switcheroo_fini_domain_pm_ops(adev->dev); - vga_client_register(adev->pdev, NULL, NULL, NULL); - if (adev->rio_mem) - pci_iounmap(adev->pdev, adev->rio_mem); - adev->rio_mem = NULL; - iounmap(adev->rmmio); - adev->rmmio = NULL; - amdgpu_device_doorbell_fini(adev); - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) - amdgpu_pm_virt_sysfs_fini(adev); + } + if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) + vga_client_unregister(adev->pdev); - amdgpu_debugfs_regs_cleanup(adev); - device_remove_file(adev->dev, &dev_attr_pcie_replay_count); - amdgpu_ucode_sysfs_fini(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); - amdgpu_debugfs_preempt_cleanup(adev); - if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + if (adev->mman.discovery_bin) amdgpu_discovery_fini(adev); + + kfree(adev->pci_state); + } +/** + * amdgpu_device_evict_resources - evict device resources + * @adev: amdgpu device object + * + * Evicts all ttm device resources(vram BOs, gart table) from the lru list + * of the vram memory type. Mainly used for evicting device resources + * at suspend time. + * + */ +static void amdgpu_device_evict_resources(struct amdgpu_device *adev) +{ + /* No need to evict vram on APUs for suspend to ram */ + if (adev->in_s3 && (adev->flags & AMD_IS_APU)) + return; + + if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) + DRM_WARN("evicting device resources failed\n"); + +} /* * Suspend & resume. @@ -3171,31 +3912,24 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer - * @suspend: suspend state * @fbcon : notify the fbdev of suspend * * Puts the hw in the suspend state (all asics). * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { - struct amdgpu_device *adev; - struct drm_crtc *crtc; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - int r; - - if (dev == NULL || dev->dev_private == NULL) { - return -ENODEV; - } - - adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; adev->in_suspend = true; + + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) + DRM_WARN("smart shift update failed\n"); + drm_kms_helper_poll_disable(dev); if (fbcon) @@ -3203,70 +3937,23 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) cancel_delayed_work_sync(&adev->delayed_init_work); - if (!amdgpu_device_has_dc_support(adev)) { - /* turn off display hw */ - drm_modeset_lock_all(dev); - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) - drm_helper_connector_dpms(connector, - DRM_MODE_DPMS_OFF); - drm_connector_list_iter_end(&iter); - drm_modeset_unlock_all(dev); - /* unpin the front buffers and cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct drm_framebuffer *fb = crtc->primary->fb; - struct amdgpu_bo *robj; - - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - amdgpu_bo_unpin(aobj); - amdgpu_bo_unreserve(aobj); - } - } - - if (fb == NULL || fb->obj[0] == NULL) { - continue; - } - robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); - } - } - } - } - - amdgpu_amdkfd_suspend(adev); - amdgpu_ras_suspend(adev); - r = amdgpu_device_ip_suspend_phase1(adev); + amdgpu_device_ip_suspend_phase1(adev); - /* evict vram memory */ - amdgpu_bo_evict_vram(adev); + if (!adev->in_s0ix) + amdgpu_amdkfd_suspend(adev, adev->in_runpm); - amdgpu_fence_driver_suspend(adev); + /* First evict vram memory */ + amdgpu_device_evict_resources(adev); - r = amdgpu_device_ip_suspend_phase2(adev); + amdgpu_fence_driver_hw_fini(adev); - /* evict remaining vram memory - * This second call to evict vram is to evict the gart page table - * using the CPU. + amdgpu_device_ip_suspend_phase2(adev); + /* This second call to evict device resources is to evict + * the gart page table using the CPU. */ - amdgpu_bo_evict_vram(adev); - - if (suspend) { - pci_save_state(dev->pdev); - /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3hot); - } + amdgpu_device_evict_resources(adev); return 0; } @@ -3275,46 +3962,36 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) * amdgpu_device_resume - initiate device resume * * @dev: drm dev pointer - * @resume: resume state * @fbcon : notify the fbdev of resume * * Bring the hw back to operating state (all asics). * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool fbcon) { - struct drm_connector *connector; - struct drm_connector_list_iter iter; - struct amdgpu_device *adev = dev->dev_private; - struct drm_crtc *crtc; + struct amdgpu_device *adev = drm_to_adev(dev); int r = 0; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (resume) { - pci_set_power_state(dev->pdev, PCI_D0); - pci_restore_state(dev->pdev); - r = pci_enable_device(dev->pdev); - if (r) - return r; - } + if (adev->in_s0ix) + amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); /* post card */ if (amdgpu_device_need_post(adev)) { - r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + r = amdgpu_device_asic_init(adev); if (r) - DRM_ERROR("amdgpu asic init failed\n"); + dev_err(adev->dev, "amdgpu asic init failed\n"); } r = amdgpu_device_ip_resume(adev); if (r) { - DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); + dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); return r; } - amdgpu_fence_driver_resume(adev); - + amdgpu_fence_driver_hw_init(adev); r = amdgpu_device_ip_late_init(adev); if (r) @@ -3323,50 +4000,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) queue_delayed_work(system_wq, &adev->delayed_init_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); - if (!amdgpu_device_has_dc_support(adev)) { - /* pin cursors */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); - r = amdgpu_bo_reserve(aobj, true); - if (r == 0) { - r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); - if (r != 0) - DRM_ERROR("Failed to pin cursor BO (%d)\n", r); - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); - amdgpu_bo_unreserve(aobj); - } - } - } + if (!adev->in_s0ix) { + r = amdgpu_amdkfd_resume(adev, adev->in_runpm); + if (r) + return r; } - r = amdgpu_amdkfd_resume(adev); - if (r) - return r; /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); - /* blat the mode back in */ - if (fbcon) { - if (!amdgpu_device_has_dc_support(adev)) { - /* pre DCE11 */ - drm_helper_resume_force_mode(dev); - - /* turn on display hw */ - drm_modeset_lock_all(dev); - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) - drm_helper_connector_dpms(connector, - DRM_MODE_DPMS_ON); - drm_connector_list_iter_end(&iter); - - drm_modeset_unlock_all(dev); - } + if (fbcon) amdgpu_fbdev_set_suspend(adev, 0); - } drm_kms_helper_poll_enable(dev); @@ -3393,6 +4037,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) #endif adev->in_suspend = false; + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) + DRM_WARN("smart shift update failed\n"); + return 0; } @@ -3424,7 +4071,7 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) adev->ip_blocks[i].status.hang = adev->ip_blocks[i].version->funcs->check_soft_reset(adev); if (adev->ip_blocks[i].status.hang) { - DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); + dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); asic_hang = true; } } @@ -3485,7 +4132,7 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { if (adev->ip_blocks[i].status.hang) { - DRM_INFO("Some block need full reset!\n"); + dev_info(adev->dev, "Some block need full reset!\n"); return true; } } @@ -3566,6 +4213,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) { struct dma_fence *fence = NULL, *next = NULL; struct amdgpu_bo *shadow; + struct amdgpu_bo_vm *vmbo; long r = 1, tmo; if (amdgpu_sriov_runtime(adev)) @@ -3573,14 +4221,14 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) else tmo = msecs_to_jiffies(100); - DRM_INFO("recover vram bo from shadow start\n"); + dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); - list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { - + list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { + shadow = &vmbo->bo; /* No need to recover an evicted BO */ - if (shadow->tbo.mem.mem_type != TTM_PL_TT || - shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || - shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) + if (shadow->tbo.resource->mem_type != TTM_PL_TT || + shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || + shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) continue; r = amdgpu_bo_restore_shadow(shadow, &next); @@ -3609,11 +4257,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dma_fence_put(fence); if (r < 0 || tmo <= 0) { - DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); + dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); return -EIO; } - DRM_INFO("recover vram bo from shadow done\n"); + dev_info(adev->dev, "recover vram bo from shadow done\n"); return 0; } @@ -3621,7 +4269,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) /** * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @from_hypervisor: request from hypervisor * * do VF FLR and reinitialize Asic @@ -3646,8 +4294,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) goto error; + amdgpu_virt_init_data_exchange(adev); /* we need recover gart prior to run SMC/CP/SDMA resume */ - amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); + amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); r = amdgpu_device_fw_loading(adev); if (r) @@ -3663,20 +4312,47 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_amdkfd_post_reset(adev); error: - amdgpu_virt_init_data_exchange(adev); - amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { amdgpu_inc_vram_lost(adev); r = amdgpu_device_recover_vram(adev); } + amdgpu_virt_release_full_gpu(adev, true); return r; } /** + * amdgpu_device_has_job_running - check if there is any job in mirror list + * + * @adev: amdgpu_device pointer + * + * check if there is any job in mirror list + */ +bool amdgpu_device_has_job_running(struct amdgpu_device *adev) +{ + int i; + struct drm_sched_job *job; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + spin_lock(&ring->sched.job_list_lock); + job = list_first_entry_or_null(&ring->sched.pending_list, + struct drm_sched_job, list); + spin_unlock(&ring->sched.job_list_lock); + if (job) + return true; + } + return false; +} + +/** * amdgpu_device_should_recover_gpu - check if we should try GPU recovery * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover * a hung GPU. @@ -3684,7 +4360,7 @@ error: bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) { if (!amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("Timeout, but no hardware hang detected.\n"); + dev_info(adev->dev, "Timeout, but no hardware hang detected.\n"); return false; } @@ -3709,6 +4385,17 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_RAVEN: + case CHIP_ARCTURUS: + case CHIP_RENOIR: + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_VANGOGH: + case CHIP_ALDEBARAN: break; default: goto disabled; @@ -3718,17 +4405,65 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) return true; disabled: - DRM_INFO("GPU recovery disabled.\n"); + dev_info(adev->dev, "GPU recovery disabled.\n"); return false; } +int amdgpu_device_mode1_reset(struct amdgpu_device *adev) +{ + u32 i; + int ret = 0; + + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + dev_info(adev->dev, "GPU mode1 reset\n"); + + /* disable BM */ + pci_clear_master(adev->pdev); + + amdgpu_device_cache_pci_state(adev->pdev); + + if (amdgpu_dpm_is_mode1_reset_supported(adev)) { + dev_info(adev->dev, "GPU smu mode1 reset\n"); + ret = amdgpu_dpm_mode1_reset(adev); + } else { + dev_info(adev->dev, "GPU psp mode1 reset\n"); + ret = psp_gpu_reset(adev); + } + + if (ret) + dev_err(adev->dev, "GPU mode1 reset failed\n"); -static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, - struct amdgpu_job *job, - bool *need_full_reset_arg) + amdgpu_device_load_pci_state(adev->pdev); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + u32 memsize = adev->nbio.funcs->get_memsize(adev); + + if (memsize != 0xffffffff) + break; + udelay(1); + } + + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + return ret; +} + +int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context) { - int i, r = 0; - bool need_full_reset = *need_full_reset_arg; + int i, j, r = 0; + struct amdgpu_job *job = NULL; + bool need_full_reset = + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + + if (reset_context->reset_req_dev == adev) + job = reset_context->job; + + if (amdgpu_sriov_vf(adev)) { + /* stop the data exchange thread */ + amdgpu_virt_fini_data_exchange(adev); + } /* block all schedulers and reset given job's ring */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -3737,13 +4472,31 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!ring || !ring->sched.thread) continue; + /*clear job fence from fence drv to avoid force_completion + *leave NULL and vm flush fence in fence drv */ + for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) { + struct dma_fence *old, **ptr; + + ptr = &ring->fence_drv.fences[j]; + old = rcu_dereference_protected(*ptr, 1); + if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) { + RCU_INIT_POINTER(*ptr, NULL); + } + } /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } - if(job) + if (job && job->vm) drm_sched_increase_karma(&job->base); + r = amdgpu_reset_prepare_hwcontext(adev, reset_context); + /* If reset handler not implemented, continue; otherwise return */ + if (r == -ENOSYS) + r = 0; + else + return r; + /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ if (!amdgpu_sriov_vf(adev)) { @@ -3755,52 +4508,69 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, r = amdgpu_device_ip_soft_reset(adev); amdgpu_device_ip_post_soft_reset(adev); if (r || amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("soft reset failed, will fallback to full reset!\n"); + dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); need_full_reset = true; } } if (need_full_reset) r = amdgpu_device_ip_suspend(adev); - - *need_full_reset_arg = need_full_reset; + if (need_full_reset) + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + else + clear_bit(AMDGPU_NEED_FULL_RESET, + &reset_context->flags); } return r; } -static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, - struct list_head *device_list_handle, - bool *need_full_reset_arg) +int amdgpu_do_asic_reset(struct list_head *device_list_handle, + struct amdgpu_reset_context *reset_context) { struct amdgpu_device *tmp_adev = NULL; - bool need_full_reset = *need_full_reset_arg, vram_lost = false; + bool need_full_reset, skip_hw_reset, vram_lost = false; int r = 0; + /* Try reset handler method first */ + tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, + reset_list); + r = amdgpu_reset_perform_reset(tmp_adev, reset_context); + /* If reset handler not implemented, continue; otherwise return */ + if (r == -ENOSYS) + r = 0; + else + return r; + + /* Reset handler not implemented, use the default method */ + need_full_reset = + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + /* - * ASIC reset has to be done on all HGMI hive nodes ASAP + * ASIC reset has to be done on all XGMI hive nodes ASAP * to allow proper links negotiation in FW (within 1 sec) */ - if (need_full_reset) { - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (!skip_hw_reset && need_full_reset) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { /* For XGMI run all resets in parallel to speed up the process */ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work)) + tmp_adev->gmc.xgmi.pending_reset = false; + if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) r = -EALREADY; } else r = amdgpu_asic_reset(tmp_adev); if (r) { - DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", - r, tmp_adev->ddev->unique); + dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", + r, adev_to_drm(tmp_adev)->unique); break; } } - /* For XGMI wait for all PSP resets to complete before proceed */ + /* For XGMI wait for all resets to complete before proceed */ if (!r) { - list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { flush_work(&tmp_adev->xgmi_reset_work); r = tmp_adev->asic_reset_res; @@ -3811,15 +4581,28 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, } } + if (!r && amdgpu_ras_intr_triggered()) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + if (tmp_adev->mmhub.ras_funcs && + tmp_adev->mmhub.ras_funcs->reset_ras_error_count) + tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev); + } + + amdgpu_ras_intr_cleared(); + } - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { if (need_full_reset) { /* post card */ - if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) - DRM_WARN("asic atom init failed!"); - - if (!r) { + r = amdgpu_device_asic_init(tmp_adev); + if (r) { + dev_warn(tmp_adev->dev, "asic atom init failed!"); + } else { dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_amdkfd_resume_iommu(tmp_adev); + if (r) + goto out; + r = amdgpu_device_ip_resume_phase1(tmp_adev); if (r) goto out; @@ -3830,8 +4613,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, amdgpu_inc_vram_lost(tmp_adev); } - r = amdgpu_gtt_mgr_recover( - &tmp_adev->mman.bdev.man[TTM_PL_TT]); + r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT)); if (r) goto out; @@ -3852,27 +4634,48 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, */ amdgpu_register_gpu_instance(tmp_adev); + if (!reset_context->hive && + tmp_adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_add_device(tmp_adev); + r = amdgpu_device_ip_late_init(tmp_adev); if (r) goto out; - /* must succeed. */ - amdgpu_ras_resume(tmp_adev); + amdgpu_fbdev_set_suspend(tmp_adev, 0); + + /* + * The GPU enters bad state once faulty pages + * by ECC has reached the threshold, and ras + * recovery is scheduled next. So add one check + * here to break recovery if it indeed exceeds + * bad page threshold, and remind user to + * retire this GPU or setting one bigger + * bad_page_threshold value to fix this once + * probing driver again. + */ + if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { + /* must succeed. */ + amdgpu_ras_resume(tmp_adev); + } else { + r = -EINVAL; + goto out; + } /* Update PSP FW topology after reset */ - if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) - r = amdgpu_xgmi_update_topology(hive, tmp_adev); + if (reset_context->hive && + tmp_adev->gmc.xgmi.num_physical_nodes > 1) + r = amdgpu_xgmi_update_topology( + reset_context->hive, tmp_adev); } } - out: if (!r) { amdgpu_irq_gpu_reset_resume_helper(tmp_adev); r = amdgpu_ib_ring_tests(tmp_adev); if (r) { dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); - r = amdgpu_device_ip_suspend(tmp_adev); need_full_reset = true; r = -EAGAIN; goto end; @@ -3886,20 +4689,25 @@ out: } end: - *need_full_reset_arg = need_full_reset; + if (need_full_reset) + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + else + clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); return r; } -static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) +static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive) { - if (trylock) { - if (!mutex_trylock(&adev->lock_reset)) - return false; - } else - mutex_lock(&adev->lock_reset); + if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) + return false; + + if (hive) { + down_write_nest_lock(&adev->reset_sem, &hive->hive_lock); + } else { + down_write(&adev->reset_sem); + } - atomic_inc(&adev->gpu_reset_counter); - adev->in_gpu_reset = 1; switch (amdgpu_asic_reset_method(adev)) { case AMD_RESET_METHOD_MODE1: adev->mp1_state = PP_MP1_STATE_SHUTDOWN; @@ -3919,14 +4727,180 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) { amdgpu_vf_error_trans_all(adev); adev->mp1_state = PP_MP1_STATE_NONE; - adev->in_gpu_reset = 0; - mutex_unlock(&adev->lock_reset); + atomic_set(&adev->in_gpu_reset, 0); + up_write(&adev->reset_sem); +} + +/* + * to lockup a list of amdgpu devices in a hive safely, if not a hive + * with multiple nodes, it will be similar as amdgpu_device_lock_adev. + * + * unlock won't require roll back. + */ +static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) +{ + struct amdgpu_device *tmp_adev = NULL; + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!hive) { + dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes"); + return -ENODEV; + } + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + if (!amdgpu_device_lock_adev(tmp_adev, hive)) + goto roll_back; + } + } else if (!amdgpu_device_lock_adev(adev, hive)) + return -EAGAIN; + + return 0; +roll_back: + if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) { + /* + * if the lockup iteration break in the middle of a hive, + * it may means there may has a race issue, + * or a hive device locked up independently. + * we may be in trouble and may not, so will try to roll back + * the lock and give out a warnning. + */ + dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock"); + list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) { + amdgpu_device_unlock_adev(tmp_adev); + } + } + return -EAGAIN; +} + +static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) +{ + struct pci_dev *p = NULL; + + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (p) { + pm_runtime_enable(&(p->dev)); + pm_runtime_resume(&(p->dev)); + } +} + +static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) +{ + enum amd_reset_method reset_method; + struct pci_dev *p = NULL; + u64 expires; + + /* + * For now, only BACO and mode1 reset are confirmed + * to suffer the audio issue without proper suspended. + */ + reset_method = amdgpu_asic_reset_method(adev); + if ((reset_method != AMD_RESET_METHOD_BACO) && + (reset_method != AMD_RESET_METHOD_MODE1)) + return -EINVAL; + + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (!p) + return -ENODEV; + + expires = pm_runtime_autosuspend_expiration(&(p->dev)); + if (!expires) + /* + * If we cannot get the audio device autosuspend delay, + * a fixed 4S interval will be used. Considering 3S is + * the audio controller default autosuspend delay setting. + * 4S used here is guaranteed to cover that. + */ + expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL; + + while (!pm_runtime_status_suspended(&(p->dev))) { + if (!pm_runtime_suspend(&(p->dev))) + break; + + if (expires < ktime_get_mono_fast_ns()) { + dev_warn(adev->dev, "failed to suspend display audio\n"); + /* TODO: abort the succeeding gpu reset? */ + return -ETIMEDOUT; + } + } + + pm_runtime_disable(&(p->dev)); + + return 0; +} + +static void amdgpu_device_recheck_guilty_jobs( + struct amdgpu_device *adev, struct list_head *device_list_handle, + struct amdgpu_reset_context *reset_context) +{ + int i, r = 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + int ret = 0; + struct drm_sched_job *s_job; + + if (!ring || !ring->sched.thread) + continue; + + s_job = list_first_entry_or_null(&ring->sched.pending_list, + struct drm_sched_job, list); + if (s_job == NULL) + continue; + + /* clear job's guilty and depend the folowing step to decide the real one */ + drm_sched_reset_karma(s_job); + drm_sched_resubmit_jobs_ext(&ring->sched, 1); + + ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); + if (ret == 0) { /* timeout */ + DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n", + ring->sched.name, s_job->id); + + /* set guilty */ + drm_sched_increase_karma(s_job); +retry: + /* do hw reset */ + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_fini_data_exchange(adev); + r = amdgpu_device_reset_sriov(adev, false); + if (r) + adev->asic_reset_res = r; + } else { + clear_bit(AMDGPU_SKIP_HW_RESET, + &reset_context->flags); + r = amdgpu_do_asic_reset(device_list_handle, + reset_context); + if (r && r == -EAGAIN) + goto retry; + } + + /* + * add reset counter so that the following + * resubmitted job could flush vmid + */ + atomic_inc(&adev->gpu_reset_counter); + continue; + } + + /* got the hw fence, signal finished fence */ + atomic_dec(ring->sched.score); + dma_fence_get(&s_job->s_fence->finished); + dma_fence_signal(&s_job->s_fence->finished); + dma_fence_put(&s_job->s_fence->finished); + + /* remove node from list and free the job */ + spin_lock(&ring->sched.job_list_lock); + list_del_init(&s_job->list); + spin_unlock(&ring->sched.job_list_lock); + ring->sched.ops->free_job(s_job); + } } /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * - * @adev: amdgpu device pointer + * @adev: amdgpu_device pointer * @job: which job trigger hang * * Attempt to reset the GPU if it has hung (all asics). @@ -3938,32 +4912,35 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) { struct list_head device_list, *device_list_handle = NULL; - bool need_full_reset, job_signaled; + bool job_signaled = false; struct amdgpu_hive_info *hive = NULL; struct amdgpu_device *tmp_adev = NULL; int i, r = 0; - bool in_ras_intr = amdgpu_ras_intr_triggered(); + bool need_emergency_restart = false; + bool audio_suspended = false; + int tmp_vram_lost_counter; + struct amdgpu_reset_context reset_context; + + memset(&reset_context, 0, sizeof(reset_context)); + + /* + * Special case: RAS triggered and full reset isn't supported + */ + need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); /* * Flush RAM to disk so that after reboot * the user can read log and see why the system rebooted. */ - if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) { - + if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) { DRM_WARN("Emergency reboot."); ksys_sync_helper(); emergency_restart(); } - need_full_reset = job_signaled = false; - INIT_LIST_HEAD(&device_list); - - dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset"); - - cancel_delayed_work_sync(&adev->delayed_init_work); - - hive = amdgpu_get_xgmi_hive(adev, false); + dev_info(adev->dev, "GPU %s begin!\n", + need_emergency_restart ? "jobs stop":"reset"); /* * Here we trylock to avoid chain of resets executing from @@ -3972,52 +4949,79 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * We always reset all schedulers for device and all devices for XGMI * hive so that should take care of them too. */ - - if (hive && !mutex_trylock(&hive->reset_lock)) { - DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", - job ? job->base.id : -1, hive->hive_id); - return 0; + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) { + DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", + job ? job->base.id : -1, hive->hive_id); + amdgpu_put_xgmi_hive(hive); + if (job && job->vm) + drm_sched_increase_karma(&job->base); + return 0; + } + mutex_lock(&hive->hive_lock); } - /* Start with adev pre asic reset first for soft reset check.*/ - if (!amdgpu_device_lock_adev(adev, !hive)) { - DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", - job ? job->base.id : -1); - return 0; - } + reset_context.method = AMD_RESET_METHOD_NONE; + reset_context.reset_req_dev = adev; + reset_context.job = job; + reset_context.hive = hive; + clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - /* Block kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_pre_reset(adev); + /* + * lock the device before we try to operate the linked list + * if didn't get the device lock, don't touch the linked list since + * others may iterating it. + */ + r = amdgpu_device_lock_hive_adev(adev, hive); + if (r) { + dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", + job ? job->base.id : -1); - /* Build list of devices to reset */ - if (adev->gmc.xgmi.num_physical_nodes > 1) { - if (!hive) { - /*unlock kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_post_reset(adev); - amdgpu_device_unlock_adev(adev); - return -ENODEV; - } + /* even we skipped this reset, still need to set the job to guilty */ + if (job && job->vm) + drm_sched_increase_karma(&job->base); + goto skip_recovery; + } - /* - * In case we are in XGMI hive mode device reset is done for all the - * nodes in the hive to retrain all XGMI links and hence the reset - * sequence is executed in loop on all nodes. - */ - device_list_handle = &hive->device_list; + /* + * Build list of devices to reset. + * In case we are in XGMI hive mode, resort the device list + * to put adev in the 1st position. + */ + INIT_LIST_HEAD(&device_list); + if (adev->gmc.xgmi.num_physical_nodes > 1) { + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + list_add_tail(&tmp_adev->reset_list, &device_list); + if (!list_is_first(&adev->reset_list, &device_list)) + list_rotate_to_front(&adev->reset_list, &device_list); + device_list_handle = &device_list; } else { - list_add_tail(&adev->gmc.xgmi.head, &device_list); + list_add_tail(&adev->reset_list, &device_list); device_list_handle = &device_list; } /* block all schedulers and reset given job's ring */ - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - if (tmp_adev != adev) { - amdgpu_device_lock_adev(tmp_adev, false); - if (!amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_pre_reset(tmp_adev); - } + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* + * Try to put the audio codec into suspend state + * before gpu reset started. + * + * Due to the power domain of the graphics device + * is shared with AZ power domain. Without this, + * we may change the audio hardware from behind + * the audio driver's back. That will trigger + * some audio codec errors. + */ + if (!amdgpu_device_suspend_display_audio(tmp_adev)) + audio_suspended = true; + + amdgpu_ras_set_error_query_ready(tmp_adev, false); + + cancel_delayed_work_sync(&tmp_adev->delayed_init_work); + + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); /* * Mark these ASICs to be reseted as untracked first @@ -4025,8 +5029,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); + amdgpu_fbdev_set_suspend(tmp_adev, 1); + /* disable ras on ALL IPs */ - if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev)) + if (!need_emergency_restart && + amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -4037,13 +5044,13 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, drm_sched_stop(&ring->sched, job ? &job->base : NULL); - if (in_ras_intr) + if (need_emergency_restart) amdgpu_job_stop_all_jobs_on_sched(&ring->sched); } + atomic_inc(&tmp_adev->gpu_reset_counter); } - - if (in_ras_intr) + if (need_emergency_restart) goto skip_sched_resume; /* @@ -4053,41 +5060,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * job->base holds a reference to parent fence */ if (job && job->base.s_fence->parent && - dma_fence_is_signaled(job->base.s_fence->parent)) + dma_fence_is_signaled(job->base.s_fence->parent)) { job_signaled = true; - - if (job_signaled) { dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; } - - /* Guilty job will be freed after this*/ - r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset); - if (r) { - /*TODO Should we stop ?*/ - DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", - r, adev->ddev->unique); - adev->asic_reset_res = r; - } - retry: /* Rest of adevs pre asic reset from XGMI hive. */ - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - - if (tmp_adev == adev) - continue; - - r = amdgpu_device_pre_asic_reset(tmp_adev, - NULL, - &need_full_reset); + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context); /*TODO Should we stop ?*/ if (r) { - DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", - r, tmp_adev->ddev->unique); + dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", + r, adev_to_drm(tmp_adev)->unique); tmp_adev->asic_reset_res = r; } } + tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ /* TODO Implement XGMI hive reset logic for SRIOV */ if (amdgpu_sriov_vf(adev)) { @@ -4095,7 +5085,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (r) adev->asic_reset_res = r; } else { - r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); + r = amdgpu_do_asic_reset(device_list_handle, &reset_context); if (r && r == -EAGAIN) goto retry; } @@ -4103,7 +5093,19 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ skip_hw_reset: /* Post ASIC reset for all devs .*/ - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + + /* + * Sometimes a later bad compute job can block a good gfx job as gfx + * and compute ring share internal GC HW mutually. We add an additional + * guilty jobs recheck step to find the real guilty job, it synchronously + * submits and pends for the first job being signaled. If it gets timeout, + * we identify it as a real guilty job. + */ + if (amdgpu_gpu_recovery == 2 && + !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter))) + amdgpu_device_recheck_guilty_jobs( + tmp_adev, device_list_handle, &reset_context); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; @@ -4119,7 +5121,7 @@ skip_hw_reset: } if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { - drm_helper_resume_force_mode(tmp_adev->ddev); + drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } tmp_adev->asic_reset_res = 0; @@ -4130,21 +5132,36 @@ skip_hw_reset: amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); + if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) + DRM_WARN("smart shift update failed\n"); } } skip_sched_resume: - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - /*unlock kfd: SRIOV would do it separately */ - if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev)) + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + /* unlock kfd: SRIOV would do it separately */ + if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) amdgpu_amdkfd_post_reset(tmp_adev); + + /* kfd_post_reset will do nothing if kfd device is not initialized, + * need to bring up kfd here if it's not be initialized before + */ + if (!adev->kfd.init_complete) + amdgpu_amdkfd_device_init(adev); + + if (audio_suspended) + amdgpu_device_resume_display_audio(tmp_adev); amdgpu_device_unlock_adev(tmp_adev); } - if (hive) - mutex_unlock(&hive->reset_lock); +skip_recovery: + if (hive) { + atomic_set(&hive->in_reset, 0); + mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); + } - if (r) + if (r && r != -EAGAIN) dev_info(adev->dev, "GPU reset end with ret = %d\n", r); return r; } @@ -4194,7 +5211,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); } else { - if (speed_cap == PCIE_SPEED_16_0GT) + if (speed_cap == PCIE_SPEED_32_0GT) + adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); + else if (speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | @@ -4214,7 +5237,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); } else { - if (platform_speed_cap == PCIE_SPEED_16_0GT) + if (platform_speed_cap == PCIE_SPEED_32_0GT) + adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); + else if (platform_speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | @@ -4285,3 +5314,313 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } } +int amdgpu_device_baco_enter(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!amdgpu_device_supports_baco(adev_to_drm(adev))) + return -ENOTSUPP; + + if (ras && adev->ras_enabled && + adev->nbio.funcs->enable_doorbell_interrupt) + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); + + return amdgpu_dpm_baco_enter(adev); +} + +int amdgpu_device_baco_exit(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; + + if (!amdgpu_device_supports_baco(adev_to_drm(adev))) + return -ENOTSUPP; + + ret = amdgpu_dpm_baco_exit(adev); + if (ret) + return ret; + + if (ras && adev->ras_enabled && + adev->nbio.funcs->enable_doorbell_interrupt) + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); + + if (amdgpu_passthrough(adev) && + adev->nbio.funcs->clear_doorbell_interrupt) + adev->nbio.funcs->clear_doorbell_interrupt(adev); + + return 0; +} + +static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + cancel_delayed_work_sync(&ring->sched.work_tdr); + } +} + +/** + * amdgpu_pci_error_detected - Called when a PCI error is detected. + * @pdev: PCI device struct + * @state: PCI channel state + * + * Description: Called when a PCI error is detected. + * + * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. + */ +pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(dev); + int i; + + DRM_INFO("PCI error: detected callback, state(%d)!!\n", state); + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + DRM_WARN("No support for XGMI hive yet..."); + return PCI_ERS_RESULT_DISCONNECT; + } + + adev->pci_channel_state = state; + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + /* Fatal error, prepare for slot reset */ + case pci_channel_io_frozen: + /* + * Cancel and wait for all TDRs in progress if failing to + * set adev->in_gpu_reset in amdgpu_device_lock_adev + * + * Locking adev->reset_sem will prevent any external access + * to GPU during PCI error recovery + */ + while (!amdgpu_device_lock_adev(adev, NULL)) + amdgpu_cancel_all_tdr(adev); + + /* + * Block any work scheduling as we do for regular GPU reset + * for the duration of the recovery + */ + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + drm_sched_stop(&ring->sched, NULL); + } + atomic_inc(&adev->gpu_reset_counter); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent error, prepare for device removal */ + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers + * @pdev: pointer to PCI device + */ +pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) +{ + + DRM_INFO("PCI error: mmio enabled callback!!\n"); + + /* TODO - dump whatever for debugging purposes */ + + /* This called only if amdgpu_pci_error_detected returns + * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still + * works, no need to reset slot. + */ + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * amdgpu_pci_slot_reset - Called when PCI slot has been reset. + * @pdev: PCI device struct + * + * Description: This routine is called by the pci error recovery + * code after the PCI slot has been reset, just before we + * should resume normal operations. + */ +pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(dev); + int r, i; + struct amdgpu_reset_context reset_context; + u32 memsize; + struct list_head device_list; + + DRM_INFO("PCI error: slot reset callback!!\n"); + + memset(&reset_context, 0, sizeof(reset_context)); + + INIT_LIST_HEAD(&device_list); + list_add_tail(&adev->reset_list, &device_list); + + /* wait for asic to come out of reset */ + msleep(500); + + /* Restore PCI confspace */ + amdgpu_device_load_pci_state(pdev); + + /* confirm ASIC came out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + memsize = amdgpu_asic_get_config_memsize(adev); + + if (memsize != 0xffffffff) + break; + udelay(1); + } + if (memsize == 0xffffffff) { + r = -ETIME; + goto out; + } + + reset_context.method = AMD_RESET_METHOD_NONE; + reset_context.reset_req_dev = adev; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); + + adev->no_hw_access = true; + r = amdgpu_device_pre_asic_reset(adev, &reset_context); + adev->no_hw_access = false; + if (r) + goto out; + + r = amdgpu_do_asic_reset(&device_list, &reset_context); + +out: + if (!r) { + if (amdgpu_device_cache_pci_state(adev->pdev)) + pci_restore_state(adev->pdev); + + DRM_INFO("PCIe error recovery succeeded\n"); + } else { + DRM_ERROR("PCIe error recovery failed, err:%d", r); + amdgpu_device_unlock_adev(adev); + } + + return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +/** + * amdgpu_pci_resume() - resume normal ops after PCI reset + * @pdev: pointer to PCI device + * + * Called when the error recovery driver tells us that its + * OK to resume normal operation. + */ +void amdgpu_pci_resume(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(dev); + int i; + + + DRM_INFO("PCI error: resume callback!!\n"); + + /* Only continue execution for the case of pci_channel_io_frozen */ + if (adev->pci_channel_state != pci_channel_io_frozen) + return; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + + drm_sched_resubmit_jobs(&ring->sched); + drm_sched_start(&ring->sched, true); + } + + amdgpu_device_unlock_adev(adev); +} + +bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(dev); + int r; + + r = pci_save_state(pdev); + if (!r) { + kfree(adev->pci_state); + + adev->pci_state = pci_store_saved_state(pdev); + + if (!adev->pci_state) { + DRM_ERROR("Failed to store PCI saved state"); + return false; + } + } else { + DRM_WARN("Failed to save PCI state, err:%d\n", r); + return false; + } + + return true; +} + +bool amdgpu_device_load_pci_state(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(dev); + int r; + + if (!adev->pci_state) + return false; + + r = pci_load_saved_state(pdev, adev->pci_state); + + if (!r) { + pci_restore_state(pdev); + } else { + DRM_WARN("Failed to load PCI state, err:%d\n", r); + return false; + } + + return true; +} + +void amdgpu_device_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) + return; +#endif + if (adev->gmc.xgmi.connected_to_cpu) + return; + + if (ring && ring->funcs->emit_hdp_flush) + amdgpu_ring_emit_hdp_flush(ring); + else + amdgpu_asic_flush_hdp(adev, ring); +} + +void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) + return; +#endif + if (adev->gmc.xgmi.connected_to_cpu) + return; + + amdgpu_asic_invalidate_hdp(adev, ring); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h new file mode 100644 index 000000000000..6b25837955c4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h @@ -0,0 +1,63 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_DF_H__ +#define __AMDGPU_DF_H__ + +struct amdgpu_df_hash_status { + bool hash_64k; + bool hash_2m; + bool hash_1g; +}; + +struct amdgpu_df_funcs { + void (*sw_init)(struct amdgpu_device *adev); + void (*sw_fini)(struct amdgpu_device *adev); + void (*enable_broadcast_mode)(struct amdgpu_device *adev, + bool enable); + u32 (*get_fb_channel_number)(struct amdgpu_device *adev); + u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); + void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, + bool enable); + void (*get_clockgating_state)(struct amdgpu_device *adev, + u32 *flags); + void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, + bool enable); + int (*pmc_start)(struct amdgpu_device *adev, uint64_t config, + int counter_idx, int is_add); + int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config, + int counter_idx, int is_remove); + void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config, + int counter_idx, uint64_t *count); + uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); + void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, + uint32_t ficadl_val, uint32_t ficadh_val); + bool (*query_ras_poison_mode)(struct amdgpu_device *adev); +}; + +struct amdgpu_df { + struct amdgpu_df_hash_status hash_status; + const struct amdgpu_df_funcs *funcs; +}; + +#endif /* __AMDGPU_DF_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index f95092741c38..d7c8d9e3c203 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -21,20 +21,60 @@ * */ +#include <linux/firmware.h> + #include "amdgpu.h" #include "amdgpu_discovery.h" -#include "soc15_common.h" #include "soc15_hw_ip.h" -#include "nbio/nbio_2_3_offset.h" #include "discovery.h" +#include "soc15.h" +#include "gfx_v9_0.h" +#include "gmc_v9_0.h" +#include "df_v1_7.h" +#include "df_v3_6.h" +#include "nbio_v6_1.h" +#include "nbio_v7_0.h" +#include "nbio_v7_4.h" +#include "hdp_v4_0.h" +#include "vega10_ih.h" +#include "vega20_ih.h" +#include "sdma_v4_0.h" +#include "uvd_v7_0.h" +#include "vce_v4_0.h" +#include "vcn_v1_0.h" +#include "vcn_v2_5.h" +#include "jpeg_v2_5.h" +#include "smuio_v9_0.h" +#include "gmc_v10_0.h" +#include "gfxhub_v2_0.h" +#include "mmhub_v2_0.h" +#include "nbio_v2_3.h" +#include "nbio_v7_2.h" +#include "hdp_v5_0.h" +#include "nv.h" +#include "navi10_ih.h" +#include "gfx_v10_0.h" +#include "sdma_v5_0.h" +#include "sdma_v5_2.h" +#include "vcn_v2_0.h" +#include "jpeg_v2_0.h" +#include "vcn_v3_0.h" +#include "jpeg_v3_0.h" +#include "amdgpu_vkms.h" +#include "mes_v10_1.h" +#include "smuio_v11_0.h" +#include "smuio_v11_0_6.h" +#include "smuio_v13_0.h" + +MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); + #define mmRCC_CONFIG_MEMSIZE 0xde3 #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 -#define HW_ID_MAX 300 -const char *hw_id_names[HW_ID_MAX] = { +static const char *hw_id_names[HW_ID_MAX] = { [MP1_HWID] = "MP1", [MP2_HWID] = "MP2", [THM_HWID] = "THM", @@ -68,6 +108,8 @@ const char *hw_id_names[HW_ID_MAX] = { [HDP_HWID] = "HDP", [SDMA0_HWID] = "SDMA0", [SDMA1_HWID] = "SDMA1", + [SDMA2_HWID] = "SDMA2", + [SDMA3_HWID] = "SDMA3", [ISP_HWID] = "ISP", [DBGU_IO_HWID] = "DBGU_IO", [DF_HWID] = "DF", @@ -130,14 +172,18 @@ static int hw_id_map[MAX_HWIP] = { [NBIF_HWIP] = NBIF_HWID, [THM_HWIP] = THM_HWID, [CLK_HWIP] = CLKA_HWID, + [UMC_HWIP] = UMC_HWID, + [XGMI_HWIP] = XGMI_HWID, + [DCI_HWIP] = DCI_HWID, }; static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) { uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; - uint64_t pos = vram_size - DISCOVERY_TMR_SIZE; + uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; - amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false); + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, + adev->mman.discovery_tmr_size, false); return 0; } @@ -158,28 +204,41 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); } -int amdgpu_discovery_init(struct amdgpu_device *adev) +static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct gpu_info_header *ghdr; + const struct firmware *fw; uint16_t offset; uint16_t size; uint16_t checksum; int r; - adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); - if (!adev->discovery) + adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; + adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); + if (!adev->mman.discovery_bin) return -ENOMEM; - r = amdgpu_discovery_read_binary(adev, adev->discovery); - if (r) { - DRM_ERROR("failed to read ip discovery binary\n"); - goto out; + if (amdgpu_discovery == 2) { + r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev); + if (r) + goto get_from_vram; + dev_info(adev->dev, "Using IP discovery from file\n"); + memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data, + adev->mman.discovery_tmr_size); + release_firmware(fw); + } else { +get_from_vram: + r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); + if (r) { + DRM_ERROR("failed to read ip discovery binary\n"); + goto out; + } } - bhdr = (struct binary_header *)adev->discovery; + bhdr = (struct binary_header *)adev->mman.discovery_bin; if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) { DRM_ERROR("invalid ip discovery binary signature\n"); @@ -192,7 +251,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) size = bhdr->binary_size - offset; checksum = bhdr->binary_checksum; - if (!amdgpu_discovery_verify_checksum(adev->discovery + offset, + if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { DRM_ERROR("invalid ip discovery binary checksum\n"); r = -EINVAL; @@ -202,7 +261,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) info = &bhdr->table_list[IP_DISCOVERY]; offset = le16_to_cpu(info->offset); checksum = le16_to_cpu(info->checksum); - ihdr = (struct ip_discovery_header *)(adev->discovery + offset); + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { DRM_ERROR("invalid ip discovery data table signature\n"); @@ -210,7 +269,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->discovery + offset, + if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, ihdr->size, checksum)) { DRM_ERROR("invalid ip discovery data table checksum\n"); r = -EINVAL; @@ -220,9 +279,9 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) info = &bhdr->table_list[GC]; offset = le16_to_cpu(info->offset); checksum = le16_to_cpu(info->checksum); - ghdr = (struct gpu_info_header *)(adev->discovery + offset); + ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); - if (!amdgpu_discovery_verify_checksum(adev->discovery + offset, + if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, ghdr->size, checksum)) { DRM_ERROR("invalid gc data table checksum\n"); r = -EINVAL; @@ -232,16 +291,32 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) return 0; out: - kfree(adev->discovery); - adev->discovery = NULL; + kfree(adev->mman.discovery_bin); + adev->mman.discovery_bin = NULL; return r; } void amdgpu_discovery_fini(struct amdgpu_device *adev) { - kfree(adev->discovery); - adev->discovery = NULL; + kfree(adev->mman.discovery_bin); + adev->mman.discovery_bin = NULL; +} + +static int amdgpu_discovery_validate_ip(const struct ip *ip) +{ + if (ip->number_instance >= HWIP_MAX_INSTANCE) { + DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", + ip->number_instance); + return -EINVAL; + } + if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { + DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", + le16_to_cpu(ip->hw_id)); + return -EINVAL; + } + + return 0; } int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) @@ -257,14 +332,16 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) uint8_t num_base_address; int hw_ip; int i, j, k; + int r; - if (!adev->discovery) { - DRM_ERROR("ip discovery uninitialized\n"); - return -EINVAL; + r = amdgpu_discovery_init(adev); + if (r) { + DRM_ERROR("amdgpu_discovery_init failed\n"); + return r; } - bhdr = (struct binary_header *)adev->discovery; - ihdr = (struct ip_discovery_header *)(adev->discovery + + bhdr = (struct binary_header *)adev->mman.discovery_bin; + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); @@ -272,7 +349,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->discovery + die_offset); + dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -286,7 +363,11 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) le16_to_cpu(dhdr->die_id), num_ips); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->discovery + ip_offset); + ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); + + if (amdgpu_discovery_validate_ip(ip)) + goto next_ip; + num_base_address = ip->num_base_address; DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", @@ -296,6 +377,14 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) ip->major, ip->minor, ip->revision); + if (le16_to_cpu(ip->hw_id) == VCN_HWID) + adev->vcn.num_vcn_inst++; + if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || + le16_to_cpu(ip->hw_id) == SDMA1_HWID || + le16_to_cpu(ip->hw_id) == SDMA2_HWID || + le16_to_cpu(ip->hw_id) == SDMA3_HWID) + adev->sdma.num_instances++; + for (k = 0; k < num_base_address; k++) { /* * convert the endianness of base addresses in place, @@ -307,14 +396,25 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { - DRM_INFO("set register base offset for %s\n", + DRM_DEBUG("set register base offset for %s\n", hw_id_names[le16_to_cpu(ip->hw_id)]); adev->reg_offset[hw_ip][ip->number_instance] = ip->base_address; + /* Instance support is somewhat inconsistent. + * SDMA is a good example. Sienna cichlid has 4 total + * SDMA instances, each enumerated separately (HWIDs + * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, + * but they are enumerated as multiple instances of the + * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another + * example. On most chips there are multiple instances + * with the same HWID. + */ + adev->ip_versions[hw_ip][ip->number_instance] = + IP_VERSION(ip->major, ip->minor, ip->revision); } - } +next_ip: ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); } } @@ -322,7 +422,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) return 0; } -int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, +int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, int *major, int *minor, int *revision) { struct binary_header *bhdr; @@ -335,26 +435,26 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, uint16_t num_ips; int i, j; - if (!adev->discovery) { + if (!adev->mman.discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->discovery; - ihdr = (struct ip_discovery_header *)(adev->discovery + + bhdr = (struct binary_header *)adev->mman.discovery_bin; + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->discovery + die_offset); + dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->discovery + ip_offset); + ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); - if (le16_to_cpu(ip->hw_id) == hw_id) { + if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { if (major) *major = ip->major; if (minor) @@ -370,18 +470,72 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, return -EINVAL; } + +int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, + int *major, int *minor, int *revision) +{ + return amdgpu_discovery_get_ip_version(adev, VCN_HWID, + vcn_instance, major, minor, revision); +} + +void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) +{ + struct binary_header *bhdr; + struct harvest_table *harvest_info; + int i, vcn_harvest_count = 0; + + bhdr = (struct binary_header *)adev->mman.discovery_bin; + harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + + le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset)); + + for (i = 0; i < 32; i++) { + if (le32_to_cpu(harvest_info->list[i].hw_id) == 0) + break; + + switch (le32_to_cpu(harvest_info->list[i].hw_id)) { + case VCN_HWID: + vcn_harvest_count++; + if (harvest_info->list[i].number_instance == 0) + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; + else + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + break; + case DMU_HWID: + adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; + break; + default: + break; + } + } + /* some IP discovery tables on Navy Flounder don't have this set correctly */ + if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + if (vcn_harvest_count == adev->vcn.num_vcn_inst) { + adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; + adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; + } + if ((adev->pdev->device == 0x731E && + (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) || + (adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) || + (adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) { + adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; + adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; + } +} + int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) { struct binary_header *bhdr; struct gc_info_v1_0 *gc_info; - if (!adev->discovery) { + if (!adev->mman.discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->discovery; - gc_info = (struct gc_info_v1_0 *)(adev->discovery + + bhdr = (struct binary_header *)adev->mman.discovery_bin; + gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[GC].offset)); adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se); @@ -405,3 +559,753 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) return 0; } + +static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) +{ + /* what IP to use for this? */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + amdgpu_device_ip_block_add(adev, &nv_common_ip_block); + break; + default: + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) +{ + /* use GC or MMHUB IP version */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); + break; + default: + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[OSSSYS_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 0, 1): + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 3, 0): + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + break; + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 1): + case IP_VERSION(4, 4, 0): + amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); + break; + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 1): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 3): + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 1): + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + break; + default: + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(9, 0, 0): + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); + break; + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); + break; + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(11, 5, 0): + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + break; + case IP_VERSION(11, 0, 8): + amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); + break; + case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 1): + amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); + break; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 3): + amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); + break; + default: + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + case IP_VERSION(11, 0, 2): + if (adev->asic_type == CHIP_ARCTURUS) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + else + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + break; + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 8): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(11, 5, 0): + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + break; + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): + amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); + break; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 3): + amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); + break; + default: + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) +{ + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) { + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); +#if defined(CONFIG_DRM_AMD_DC) + } else if (adev->ip_versions[DCE_HWIP][0]) { + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 3): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + amdgpu_device_ip_block_add(adev, &dm_ip_block); + break; + default: + return -EINVAL; + } + } else if (adev->ip_versions[DCI_HWIP][0]) { + switch (adev->ip_versions[DCI_HWIP][0]) { + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): + case IP_VERSION(12, 1, 0): + amdgpu_device_ip_block_add(adev, &dm_ip_block); + break; + default: + return -EINVAL; + } +#endif + } + return 0; +} + +static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); + break; + default: + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 0, 1): + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 2): + case IP_VERSION(4, 4, 0): + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + break; + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 1): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): + amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); + break; + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 4): + case IP_VERSION(5, 2, 5): + case IP_VERSION(5, 2, 3): + case IP_VERSION(5, 2, 1): + amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); + break; + default: + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) +{ + if (adev->ip_versions[VCE_HWIP][0]) { + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 2, 0): + /* UVD is not supported on vega20 SR-IOV */ + if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); + break; + default: + return -EINVAL; + } + switch (adev->ip_versions[VCE_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 1, 0): + /* VCE is not supported on vega20 SR-IOV */ + if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); + break; + default: + return -EINVAL; + } + } else { + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); + break; + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 2, 0): + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + break; + case IP_VERSION(2, 0, 3): + break; + case IP_VERSION(2, 5, 0): + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); + break; + case IP_VERSION(2, 6, 0): + amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); + break; + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 16): + case IP_VERSION(3, 1, 1): + case IP_VERSION(3, 0, 2): + amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); + break; + case IP_VERSION(3, 0, 33): + amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); + break; + default: + return -EINVAL; + } + } + return 0; +} + +static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); + break; + default: + break;; + } + return 0; +} + +int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) +{ + int r; + + switch (adev->asic_type) { + case CHIP_VEGA10: + vega10_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); + break; + case CHIP_VEGA12: + vega10_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); + break; + case CHIP_RAVEN: + vega10_reg_base_init(adev); + adev->sdma.num_instances = 1; + adev->vcn.num_vcn_inst = 1; + if (adev->apu_flags & AMD_APU_IS_RAVEN2) { + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); + adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); + } else { + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); + adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); + } + break; + case CHIP_VEGA20: + vega20_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); + adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); + break; + case CHIP_ARCTURUS: + arct_reg_base_init(adev); + adev->sdma.num_instances = 8; + adev->vcn.num_vcn_inst = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); + break; + case CHIP_ALDEBARAN: + aldebaran_reg_base_init(adev); + adev->sdma.num_instances = 5; + adev->vcn.num_vcn_inst = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); + adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); + break; + default: + r = amdgpu_discovery_reg_base_init(adev); + if (r) + return -EINVAL; + + amdgpu_discovery_harvest_ip(adev); + + if (!adev->mman.discovery_bin) { + DRM_ERROR("ip discovery uninitialized\n"); + return -EINVAL; + } + break; + } + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + adev->family = AMDGPU_FAMILY_AI; + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + adev->family = AMDGPU_FAMILY_RV; + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + adev->family = AMDGPU_FAMILY_NV; + break; + case IP_VERSION(10, 3, 1): + adev->family = AMDGPU_FAMILY_VGH; + break; + case IP_VERSION(10, 3, 3): + adev->family = AMDGPU_FAMILY_YC; + break; + default: + return -EINVAL; + } + + if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) + adev->gmc.xgmi.supported = true; + + /* set NBIO version */ + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(6, 1, 0): + case IP_VERSION(6, 2, 0): + adev->nbio.funcs = &nbio_v6_1_funcs; + adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; + break; + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 0, 1): + case IP_VERSION(2, 5, 0): + adev->nbio.funcs = &nbio_v7_0_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; + break; + case IP_VERSION(7, 4, 0): + case IP_VERSION(7, 4, 1): + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; + break; + case IP_VERSION(7, 4, 4): + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald; + break; + case IP_VERSION(7, 2, 0): + case IP_VERSION(7, 2, 1): + case IP_VERSION(7, 5, 0): + adev->nbio.funcs = &nbio_v7_2_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; + break; + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 3, 1): + case IP_VERSION(2, 3, 2): + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; + break; + case IP_VERSION(3, 3, 0): + case IP_VERSION(3, 3, 1): + case IP_VERSION(3, 3, 2): + case IP_VERSION(3, 3, 3): + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc; + break; + default: + break; + } + + switch (adev->ip_versions[HDP_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 0, 1): + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 1): + case IP_VERSION(4, 4, 0): + adev->hdp.funcs = &hdp_v4_0_funcs; + break; + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 1): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 3): + case IP_VERSION(5, 0, 4): + case IP_VERSION(5, 2, 0): + adev->hdp.funcs = &hdp_v5_0_funcs; + break; + default: + break; + } + + switch (adev->ip_versions[DF_HWIP][0]) { + case IP_VERSION(3, 6, 0): + case IP_VERSION(3, 6, 1): + case IP_VERSION(3, 6, 2): + adev->df.funcs = &df_v3_6_funcs; + break; + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 5, 2): + adev->df.funcs = &df_v1_7_funcs; + break; + default: + break; + } + + switch (adev->ip_versions[SMUIO_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(9, 0, 1): + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + case IP_VERSION(10, 0, 2): + adev->smuio.funcs = &smuio_v9_0_funcs; + break; + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 8): + adev->smuio.funcs = &smuio_v11_0_funcs; + break; + case IP_VERSION(11, 0, 6): + case IP_VERSION(11, 0, 10): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(13, 0, 1): + adev->smuio.funcs = &smuio_v11_0_6_funcs; + break; + case IP_VERSION(13, 0, 2): + adev->smuio.funcs = &smuio_v13_0_funcs; + break; + default: + break; + } + + r = amdgpu_discovery_set_common_ip_blocks(adev); + if (r) + return r; + + r = amdgpu_discovery_set_gmc_ip_blocks(adev); + if (r) + return r; + + /* For SR-IOV, PSP needs to be initialized before IH */ + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_discovery_set_psp_ip_blocks(adev); + if (r) + return r; + r = amdgpu_discovery_set_ih_ip_blocks(adev); + if (r) + return r; + } else { + r = amdgpu_discovery_set_ih_ip_blocks(adev); + if (r) + return r; + + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { + r = amdgpu_discovery_set_psp_ip_blocks(adev); + if (r) + return r; + } + } + + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { + r = amdgpu_discovery_set_smu_ip_blocks(adev); + if (r) + return r; + } + + r = amdgpu_discovery_set_display_ip_blocks(adev); + if (r) + return r; + + r = amdgpu_discovery_set_gc_ip_blocks(adev); + if (r) + return r; + + r = amdgpu_discovery_set_sdma_ip_blocks(adev); + if (r) + return r; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && + !amdgpu_sriov_vf(adev)) { + r = amdgpu_discovery_set_smu_ip_blocks(adev); + if (r) + return r; + } + + r = amdgpu_discovery_set_mm_ip_blocks(adev); + if (r) + return r; + + if (adev->enable_mes) { + r = amdgpu_discovery_set_mes_ip_blocks(adev); + if (r) + return r; + } + + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index ba78e15d9b05..0ea029e3b850 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,13 +24,18 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ -#define DISCOVERY_TMR_SIZE (64 << 10) +#define DISCOVERY_TMR_SIZE (4 << 10) +#define DISCOVERY_TMR_OFFSET (64 << 10) -int amdgpu_discovery_init(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); -int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, +void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev); +int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, int *major, int *minor, int *revision); + +int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, + int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); +int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 3cadb0b76f22..dc50c05f23fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -38,6 +38,7 @@ #include <drm/drm_edid.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> static void amdgpu_display_flip_callback(struct dma_fence *f, @@ -93,13 +94,13 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) * targeted by the flip */ if (amdgpu_crtc->enabled && - (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, + (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0, &vpos, &hpos, NULL, NULL, &crtc->hwmode) & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (int)(work->target_vblank - - amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) { + amdgpu_get_vblank_counter_kms(crtc)) > 0) { schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); return; } @@ -132,10 +133,7 @@ static void amdgpu_display_unpin_work_func(struct work_struct *__work) /* unpin of the old buffer */ r = amdgpu_bo_reserve(work->old_abo, true); if (likely(r == 0)) { - r = amdgpu_bo_unpin(work->old_abo); - if (unlikely(r != 0)) { - DRM_ERROR("failed to unpin buffer after flip\n"); - } + amdgpu_bo_unpin(work->old_abo); amdgpu_bo_unreserve(work->old_abo); } else DRM_ERROR("failed to reserve buffer after flip\n"); @@ -152,7 +150,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_gem_object *obj; struct amdgpu_flip_work *work; @@ -205,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, - &work->shared_count, - &work->shared); + r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, + &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); goto unpin; @@ -219,7 +216,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, if (!adev->enable_virtual_display) work->base = amdgpu_bo_gpu_offset(new_abo); work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + - amdgpu_get_vblank_counter_kms(dev, work->crtc_id); + amdgpu_get_vblank_counter_kms(crtc); /* we borrow the event spin lock for protecting flip_wrok */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -249,8 +246,7 @@ pflip_cleanup: } unpin: if (!adev->enable_virtual_display) - if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) - DRM_ERROR("failed to unpin new abo in error path\n"); + amdgpu_bo_unpin(new_abo); unreserve: amdgpu_bo_unreserve(new_abo); @@ -282,7 +278,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_crtc_helper_set_config(set, ctx); @@ -292,7 +288,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, pm_runtime_mark_last_busy(dev->dev); - adev = dev->dev_private; + adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, take the current one */ if (active && !adev->have_disp_power_ref) { @@ -306,6 +302,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, adev->have_disp_power_ref = false; } +out: /* drop the power reference we got coming in here */ pm_runtime_put_autosuspend(dev->dev); return ret; @@ -443,10 +440,6 @@ void amdgpu_display_print_display_setup(struct drm_device *dev) drm_connector_list_iter_end(&iter); } -/** - * amdgpu_display_ddc_probe - * - */ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux) { @@ -511,33 +504,674 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, * to avoid hang caused by placement of scanout BO in GTT on certain * APUs. So force the BO placement to VRAM in case this architecture * will not allow USWC mappings. - * Also, don't allow GTT domain if the BO doens't have USWC falg set. + * Also, don't allow GTT domain if the BO doesn't have USWC flag set. */ - if (adev->asic_type >= CHIP_CARRIZO && - adev->asic_type < CHIP_RAVEN && - (adev->flags & AMD_IS_APU) && - (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && + if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && amdgpu_bo_support_uswc(bo_flags) && - amdgpu_device_asic_has_dc_support(adev->asic_type)) - domain |= AMDGPU_GEM_DOMAIN_GTT; + amdgpu_device_asic_has_dc_support(adev->asic_type)) { + switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + domain |= AMDGPU_GEM_DOMAIN_GTT; + break; + case CHIP_RAVEN: + /* enable S/G on PCO and RV2 */ + if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || + (adev->apu_flags & AMD_APU_IS_PICASSO)) + domain |= AMDGPU_GEM_DOMAIN_GTT; + break; + case CHIP_RENOIR: + case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: + domain |= AMDGPU_GEM_DOMAIN_GTT; + break; + + default: + break; + } + } #endif return domain; } -int amdgpu_display_framebuffer_init(struct drm_device *dev, - struct amdgpu_framebuffer *rfb, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj) +static const struct drm_format_info dcc_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2, + .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2, + .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, +}; + +static const struct drm_format_info dcc_retile_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3, + .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, + .has_alpha = true, }, + { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3, + .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, }, +}; + +static const struct drm_format_info * +lookup_format_info(const struct drm_format_info formats[], + int num_formats, u32 format) +{ + int i; + + for (i = 0; i < num_formats; i++) { + if (formats[i].format == format) + return &formats[i]; + } + + return NULL; +} + +const struct drm_format_info * +amdgpu_lookup_format_info(u32 format, uint64_t modifier) +{ + if (!IS_AMD_FMT_MOD(modifier)) + return NULL; + + if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) + return lookup_format_info(dcc_retile_formats, + ARRAY_SIZE(dcc_retile_formats), + format); + + if (AMD_FMT_MOD_GET(DCC, modifier)) + return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats), + format); + + /* returning NULL will cause the default format structs to be used. */ + return NULL; +} + + +/* + * Tries to extract the renderable DCC offset from the opaque metadata attached + * to the buffer. + */ +static int +extract_render_dcc_offset(struct amdgpu_device *adev, + struct drm_gem_object *obj, + uint64_t *offset) { + struct amdgpu_bo *rbo; + int r = 0; + uint32_t metadata[10]; /* Something that fits a descriptor + header. */ + uint32_t size; + + rbo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(rbo, false); + + if (unlikely(r)) { + /* Don't show error message when returning -ERESTARTSYS */ + if (r != -ERESTARTSYS) + DRM_ERROR("Unable to reserve buffer: %d\n", r); + return r; + } + + r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL); + amdgpu_bo_unreserve(rbo); + + if (r) + return r; + + /* + * The first word is the metadata version, and we need space for at least + * the version + pci vendor+device id + 8 words for a descriptor. + */ + if (size < 40 || metadata[0] != 1) + return -EINVAL; + + if (adev->family >= AMDGPU_FAMILY_NV) { + /* resource word 6/7 META_DATA_ADDRESS{_LO} */ + *offset = ((u64)metadata[9] << 16u) | + ((metadata[8] & 0xFF000000u) >> 16); + } else { + /* resource word 5/7 META_DATA_ADDRESS */ + *offset = ((u64)metadata[9] << 8u) | + ((u64)(metadata[7] & 0x1FE0000u) << 23); + } + + return 0; +} + +static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) +{ + struct amdgpu_device *adev = drm_to_adev(afb->base.dev); + uint64_t modifier = 0; + + if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) { + modifier = DRM_FORMAT_MOD_LINEAR; + } else { + int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE); + bool has_xor = swizzle >= 16; + int block_size_bits; + int version; + int pipe_xor_bits = 0; + int bank_xor_bits = 0; + int packers = 0; + int rb = 0; + int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); + uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B); + + switch (swizzle >> 2) { + case 0: /* 256B */ + block_size_bits = 8; + break; + case 1: /* 4KiB */ + case 5: /* 4KiB _X */ + block_size_bits = 12; + break; + case 2: /* 64KiB */ + case 4: /* 64 KiB _T */ + case 6: /* 64 KiB _X */ + block_size_bits = 16; + break; + default: + /* RESERVED or VAR */ + return -EINVAL; + } + + if (adev->asic_type >= CHIP_SIENNA_CICHLID) + version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS; + else if (adev->family == AMDGPU_FAMILY_NV) + version = AMD_FMT_MOD_TILE_VER_GFX10; + else + version = AMD_FMT_MOD_TILE_VER_GFX9; + + switch (swizzle & 3) { + case 0: /* Z microtiling */ + return -EINVAL; + case 1: /* S microtiling */ + if (!has_xor) + version = AMD_FMT_MOD_TILE_VER_GFX9; + break; + case 2: + if (!has_xor && afb->base.format->cpp[0] != 4) + version = AMD_FMT_MOD_TILE_VER_GFX9; + break; + case 3: + break; + } + + if (has_xor) { + switch (version) { + case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: + pipe_xor_bits = min(block_size_bits - 8, pipes); + packers = min(block_size_bits - 8 - pipe_xor_bits, + ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs)); + break; + case AMD_FMT_MOD_TILE_VER_GFX10: + pipe_xor_bits = min(block_size_bits - 8, pipes); + break; + case AMD_FMT_MOD_TILE_VER_GFX9: + rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + + ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); + pipe_xor_bits = min(block_size_bits - 8, pipes + + ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); + bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits, + ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); + break; + } + } + + modifier = AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) | + AMD_FMT_MOD_SET(TILE_VERSION, version) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, packers); + + if (dcc_offset != 0) { + bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0; + bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS; + const struct drm_format_info *format_info; + u64 render_dcc_offset; + + /* Enable constant encode on RAVEN2 and later. */ + bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN || + (adev->asic_type == CHIP_RAVEN && + adev->external_rev_id >= 0x81); + + int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B : + dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B : + AMD_FMT_MOD_DCC_BLOCK_256B; + + modifier |= AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size); + + afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0]; + afb->base.pitches[1] = + AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1; + + /* + * If the userspace driver uses retiling the tiling flags do not contain + * info on the renderable DCC buffer. Luckily the opaque metadata contains + * the info so we can try to extract it. The kernel does not use this info + * but we should convert it to a modifier plane for getfb2, so the + * userspace driver that gets it doesn't have to juggle around another DCC + * plane internally. + */ + if (extract_render_dcc_offset(adev, afb->base.obj[0], + &render_dcc_offset) == 0 && + render_dcc_offset != 0 && + render_dcc_offset != afb->base.offsets[1] && + render_dcc_offset < UINT_MAX) { + uint32_t dcc_block_bits; /* of base surface data */ + + modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1); + afb->base.offsets[2] = render_dcc_offset; + + if (adev->family >= AMDGPU_FAMILY_NV) { + int extra_pipe = 0; + + if (adev->asic_type >= CHIP_SIENNA_CICHLID && + pipes == packers && pipes > 1) + extra_pipe = 1; + + dcc_block_bits = max(20, 16 + pipes + extra_pipe); + } else { + modifier |= AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes); + dcc_block_bits = max(20, 18 + rb); + } + + dcc_block_bits -= ilog2(afb->base.format->cpp[0]); + afb->base.pitches[2] = ALIGN(afb->base.width, + 1u << ((dcc_block_bits + 1) / 2)); + } + format_info = amdgpu_lookup_format_info(afb->base.format->format, + modifier); + if (!format_info) + return -EINVAL; + + afb->base.format = format_info; + } + } + + afb->base.modifier = modifier; + afb->base.flags |= DRM_MODE_FB_MODIFIERS; + return 0; +} + +/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */ +static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb) +{ + u64 micro_tile_mode; + + /* Zero swizzle mode means linear */ + if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0) + return 0; + + micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE); + switch (micro_tile_mode) { + case 0: /* DISPLAY */ + case 3: /* RENDER */ + return 0; + default: + drm_dbg_kms(afb->base.dev, + "Micro tile mode %llu not supported for scanout\n", + micro_tile_mode); + return -EINVAL; + } +} + +static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, + unsigned int *width, unsigned int *height) +{ + unsigned int cpp_log2 = ilog2(cpp); + unsigned int pixel_log2 = block_log2 - cpp_log2; + unsigned int width_log2 = (pixel_log2 + 1) / 2; + unsigned int height_log2 = pixel_log2 - width_log2; + + *width = 1 << width_log2; + *height = 1 << height_log2; +} + +static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned, + bool pipe_aligned) +{ + unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier); + + switch (ver) { + case AMD_FMT_MOD_TILE_VER_GFX9: { + /* + * TODO: for pipe aligned we may need to check the alignment of the + * total size of the surface, which may need to be bigger than the + * natural alignment due to some HW workarounds + */ + return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12); + } + case AMD_FMT_MOD_TILE_VER_GFX10: + case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: { + int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); + + if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 && + AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2) + ++pipes_log2; + + return max(8 + (pipe_aligned ? pipes_log2 : 0), 12); + } + default: + return 0; + } +} + +static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane, + const struct drm_format_info *format, + unsigned int block_width, unsigned int block_height, + unsigned int block_size_log2) +{ + unsigned int width = rfb->base.width / + ((plane && plane < format->num_planes) ? format->hsub : 1); + unsigned int height = rfb->base.height / + ((plane && plane < format->num_planes) ? format->vsub : 1); + unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1; + unsigned int block_pitch = block_width * cpp; + unsigned int min_pitch = ALIGN(width * cpp, block_pitch); + unsigned int block_size = 1 << block_size_log2; + uint64_t size; + + if (rfb->base.pitches[plane] % block_pitch) { + drm_dbg_kms(rfb->base.dev, + "pitch %d for plane %d is not a multiple of block pitch %d\n", + rfb->base.pitches[plane], plane, block_pitch); + return -EINVAL; + } + if (rfb->base.pitches[plane] < min_pitch) { + drm_dbg_kms(rfb->base.dev, + "pitch %d for plane %d is less than minimum pitch %d\n", + rfb->base.pitches[plane], plane, min_pitch); + return -EINVAL; + } + + /* Force at least natural alignment. */ + if (rfb->base.offsets[plane] % block_size) { + drm_dbg_kms(rfb->base.dev, + "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n", + rfb->base.offsets[plane], plane, block_size); + return -EINVAL; + } + + size = rfb->base.offsets[plane] + + (uint64_t)rfb->base.pitches[plane] / block_pitch * + block_size * DIV_ROUND_UP(height, block_height); + + if (rfb->base.obj[0]->size < size) { + drm_dbg_kms(rfb->base.dev, + "BO size 0x%zx is less than 0x%llx required for plane %d\n", + rfb->base.obj[0]->size, size, plane); + return -EINVAL; + } + + return 0; +} + + +static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) +{ + const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format); + uint64_t modifier = rfb->base.modifier; int ret; + unsigned int i, block_width, block_height, block_size_log2; + + if (!rfb->base.dev->mode_config.allow_fb_modifiers) + return 0; + + for (i = 0; i < format_info->num_planes; ++i) { + if (modifier == DRM_FORMAT_MOD_LINEAR) { + block_width = 256 / format_info->cpp[i]; + block_height = 1; + block_size_log2 = 8; + } else { + int swizzle = AMD_FMT_MOD_GET(TILE, modifier); + + switch ((swizzle & ~3) + 1) { + case DC_SW_256B_S: + block_size_log2 = 8; + break; + case DC_SW_4KB_S: + case DC_SW_4KB_S_X: + block_size_log2 = 12; + break; + case DC_SW_64KB_S: + case DC_SW_64KB_S_T: + case DC_SW_64KB_S_X: + block_size_log2 = 16; + break; + default: + drm_dbg_kms(rfb->base.dev, + "Swizzle mode with unknown block size: %d\n", swizzle); + return -EINVAL; + } + + get_block_dimensions(block_size_log2, format_info->cpp[i], + &block_width, &block_height); + } + + ret = amdgpu_display_verify_plane(rfb, i, format_info, + block_width, block_height, block_size_log2); + if (ret) + return ret; + } + + if (AMD_FMT_MOD_GET(DCC, modifier)) { + if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) { + block_size_log2 = get_dcc_block_size(modifier, false, false); + get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], + &block_width, &block_height); + ret = amdgpu_display_verify_plane(rfb, i, format_info, + block_width, block_height, + block_size_log2); + if (ret) + return ret; + + ++i; + block_size_log2 = get_dcc_block_size(modifier, true, true); + } else { + bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier); + + block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned); + } + get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], + &block_width, &block_height); + ret = amdgpu_display_verify_plane(rfb, i, format_info, + block_width, block_height, block_size_log2); + if (ret) + return ret; + } + + return 0; +} + +static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, + uint64_t *tiling_flags, bool *tmz_surface) +{ + struct amdgpu_bo *rbo; + int r; + + if (!amdgpu_fb) { + *tiling_flags = 0; + *tmz_surface = false; + return 0; + } + + rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]); + r = amdgpu_bo_reserve(rbo, false); + + if (unlikely(r)) { + /* Don't show error message when returning -ERESTARTSYS */ + if (r != -ERESTARTSYS) + DRM_ERROR("Unable to reserve buffer: %d\n", r); + return r; + } + + if (tiling_flags) + amdgpu_bo_get_tiling_flags(rbo, tiling_flags); + + if (tmz_surface) + *tmz_surface = amdgpu_bo_encrypted(rbo); + + amdgpu_bo_unreserve(rbo); + + return r; +} + +int amdgpu_display_gem_fb_init(struct drm_device *dev, + struct amdgpu_framebuffer *rfb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + int ret; + rfb->base.obj[0] = obj; drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); + + ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj); + if (ret) + goto err; + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); - if (ret) { - rfb->base.obj[0] = NULL; + if (ret) + goto err; + + return 0; +err: + drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret); + rfb->base.obj[0] = NULL; + return ret; +} + +int amdgpu_display_gem_fb_verify_and_init( + struct drm_device *dev, struct amdgpu_framebuffer *rfb, + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + int ret; + + rfb->base.obj[0] = obj; + drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); + /* Verify that the modifier is supported. */ + if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, + mode_cmd->modifier[0])) { + drm_dbg_kms(dev, + "unsupported pixel format %p4cc / modifier 0x%llx\n", + &mode_cmd->pixel_format, mode_cmd->modifier[0]); + + ret = -EINVAL; + goto err; + } + + ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj); + if (ret) + goto err; + + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); + if (ret) + goto err; + + return 0; +err: + drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret); + rfb->base.obj[0] = NULL; + return ret; +} + +int amdgpu_display_framebuffer_init(struct drm_device *dev, + struct amdgpu_framebuffer *rfb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int ret, i; + + /* + * This needs to happen before modifier conversion as that might change + * the number of planes. + */ + for (i = 1; i < rfb->base.format->num_planes; ++i) { + if (mode_cmd->handles[i] != mode_cmd->handles[0]) { + drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n", + i, mode_cmd->handles[0], mode_cmd->handles[i]); + ret = -EINVAL; + return ret; + } + } + + ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface); + if (ret) + return ret; + + if (!dev->mode_config.allow_fb_modifiers) { + drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, + "GFX9+ requires FB check based on format modifier\n"); + ret = check_tiling_flags_gfx6(rfb); + if (ret) + return ret; + } + + if (dev->mode_config.allow_fb_modifiers && + !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { + ret = convert_tiling_flags_to_modifier(rfb); + if (ret) { + drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier", + rfb->tiling_flags); + return ret; + } + } + + ret = amdgpu_display_verify_sizes(rfb); + if (ret) return ret; + + for (i = 0; i < rfb->base.format->num_planes; ++i) { + drm_gem_object_get(rfb->base.obj[0]); + rfb->base.obj[i] = rfb->base.obj[0]; } + return 0; } @@ -546,36 +1180,43 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_gem_object *obj; struct amdgpu_framebuffer *amdgpu_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *bo; + uint32_t domains; int ret; obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { - dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " - "can't create framebuffer\n", mode_cmd->handles[0]); + drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, " + "can't create framebuffer\n", mode_cmd->handles[0]); return ERR_PTR(-ENOENT); } /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ - if (obj->import_attach) { - DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); + bo = gem_to_amdgpu_bo(obj); + domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); + if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { + drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); + drm_gem_object_put(obj); return ERR_PTR(-EINVAL); } amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ERR_PTR(-ENOMEM); } - ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); + ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv, + mode_cmd, obj); if (ret) { kfree(amdgpu_fb); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ERR_PTR(ret); } + drm_gem_object_put(obj); return &amdgpu_fb->base; } @@ -607,51 +1248,51 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) int sz; adev->mode_info.coherent_mode_property = - drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); + drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1); if (!adev->mode_info.coherent_mode_property) return -ENOMEM; adev->mode_info.load_detect_property = - drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); + drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1); if (!adev->mode_info.load_detect_property) return -ENOMEM; - drm_mode_create_scaling_mode_property(adev->ddev); + drm_mode_create_scaling_mode_property(adev_to_drm(adev)); sz = ARRAY_SIZE(amdgpu_underscan_enum_list); adev->mode_info.underscan_property = - drm_property_create_enum(adev->ddev, 0, - "underscan", - amdgpu_underscan_enum_list, sz); + drm_property_create_enum(adev_to_drm(adev), 0, + "underscan", + amdgpu_underscan_enum_list, sz); adev->mode_info.underscan_hborder_property = - drm_property_create_range(adev->ddev, 0, - "underscan hborder", 0, 128); + drm_property_create_range(adev_to_drm(adev), 0, + "underscan hborder", 0, 128); if (!adev->mode_info.underscan_hborder_property) return -ENOMEM; adev->mode_info.underscan_vborder_property = - drm_property_create_range(adev->ddev, 0, - "underscan vborder", 0, 128); + drm_property_create_range(adev_to_drm(adev), 0, + "underscan vborder", 0, 128); if (!adev->mode_info.underscan_vborder_property) return -ENOMEM; sz = ARRAY_SIZE(amdgpu_audio_enum_list); adev->mode_info.audio_property = - drm_property_create_enum(adev->ddev, 0, + drm_property_create_enum(adev_to_drm(adev), 0, "audio", amdgpu_audio_enum_list, sz); sz = ARRAY_SIZE(amdgpu_dither_enum_list); adev->mode_info.dither_property = - drm_property_create_enum(adev->ddev, 0, + drm_property_create_enum(adev_to_drm(adev), 0, "dither", amdgpu_dither_enum_list, sz); if (amdgpu_device_has_dc_support(adev)) { adev->mode_info.abm_level_property = - drm_property_create_range(adev->ddev, 0, - "abm level", 0, 4); + drm_property_create_range(adev_to_drm(adev), 0, + "abm level", 0, 4); if (!adev->mode_info.abm_level_property) return -ENOMEM; } @@ -690,7 +1331,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_encoder *amdgpu_encoder; struct drm_connector *connector; - struct amdgpu_connector *amdgpu_connector; u32 src_v = 1, dst_v = 1; u32 src_h = 1, dst_h = 1; @@ -702,7 +1342,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, continue; amdgpu_encoder = to_amdgpu_encoder(encoder); connector = amdgpu_get_connector_for_encoder(encoder); - amdgpu_connector = to_amdgpu_connector(connector); /* set scaling */ if (amdgpu_encoder->rmx_type == RMX_OFF) @@ -803,7 +1442,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, int vbl_start, vbl_end, vtotal, ret = 0; bool in_vbl = true; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ @@ -916,3 +1555,104 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) return AMDGPU_CRTC_IRQ_NONE; } } + +bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, + bool in_vblank_irq, int *vpos, + int *hpos, ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; + + return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, + stime, etime, mode); +} + +int amdgpu_display_suspend_helper(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + int r; + + /* turn off display hw */ + drm_modeset_lock_all(dev); + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_OFF); + drm_connector_list_iter_end(&iter); + drm_modeset_unlock_all(dev); + /* unpin the front buffers and cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_framebuffer *fb = crtc->primary->fb; + struct amdgpu_bo *robj; + + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + amdgpu_bo_unpin(aobj); + amdgpu_bo_unreserve(aobj); + } + } + + if (fb == NULL || fb->obj[0] == NULL) { + continue; + } + robj = gem_to_amdgpu_bo(fb->obj[0]); + /* don't unpin kernel fb objects */ + if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); + } + } + } + return 0; +} + +int amdgpu_display_resume_helper(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct drm_crtc *crtc; + int r; + + /* pin cursors */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + r = amdgpu_bo_reserve(aobj, true); + if (r == 0) { + r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); + if (r != 0) + dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); + amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); + amdgpu_bo_unreserve(aobj); + } + } + } + + drm_helper_resume_force_mode(dev); + + /* turn on display hw */ + drm_modeset_lock_all(dev); + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_ON); + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock_all(dev); + + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index 3620b24785e1..7b6d83e2b13c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -44,5 +44,10 @@ struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); +const struct drm_format_info * +amdgpu_lookup_format_info(u32 format, uint64_t modifier); + +int amdgpu_display_suspend_helper(struct amdgpu_device *adev); +int amdgpu_display_resume_helper(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e2eec7b66334..ae6ab93c868b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -35,191 +35,106 @@ #include "amdgpu_display.h" #include "amdgpu_gem.h" #include "amdgpu_dma_buf.h" +#include "amdgpu_xgmi.h" #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> #include <linux/dma-fence-array.h> +#include <linux/pci-p2pdma.h> +#include <linux/pm_runtime.h> /** - * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation - * @obj: GEM BO + * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation * - * Sets up an in-kernel virtual mapping of the BO's memory. + * @dmabuf: DMA-buf where we attach to + * @attach: attachment to add * - * Returns: - * The virtual address of the mapping or an error pointer. + * Add the attachment as user to the exported DMA-buf. */ -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) +static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) { + struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int ret; + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int r; - ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, - &bo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); + if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0) + attach->peer2peer = false; - return bo->dma_buf_vmap.virtual; -} + if (attach->dev->driver == adev->dev->driver) + return 0; -/** - * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation - * @obj: GEM BO - * @vaddr: Virtual address (unused) - * - * Tears down the in-kernel virtual mapping of the BO's memory. - */ -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) + goto out; + + return 0; - ttm_bo_kunmap(&bo->dma_buf_vmap); +out: + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; } /** - * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation - * @obj: GEM BO - * @vma: Virtual memory area + * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation * - * Sets up a userspace mapping of the BO's memory in the given - * virtual memory area. + * @dmabuf: DMA-buf where we remove the attachment from + * @attach: the attachment to remove * - * Returns: - * 0 on success or a negative error code on failure. + * Called when an attachment is removed from the DMA-buf. */ -int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) { + struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - unsigned asize = amdgpu_bo_size(bo); - int ret; - - if (!vma->vm_file) - return -ENODEV; - if (adev == NULL) - return -ENODEV; - - /* Check for valid size. */ - if (asize < vma->vm_end - vma->vm_start) - return -EINVAL; - - if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || - (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { - return -EPERM; - } - vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; - - /* prime mmap does not need to check access, so allow here */ - ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); - if (ret) - return ret; - - ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); - drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); - - return ret; -} - -static int -__dma_resv_make_exclusive(struct dma_resv *obj) -{ - struct dma_fence **fences; - unsigned int count; - int r; - - if (!dma_resv_get_list(obj)) /* no shared fences to convert */ - return 0; - - r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); - if (r) - return r; - - if (count == 0) { - /* Now that was unexpected. */ - } else if (count == 1) { - dma_resv_add_excl_fence(obj, fences[0]); - dma_fence_put(fences[0]); - kfree(fences); - } else { - struct dma_fence_array *array; - - array = dma_fence_array_create(count, fences, - dma_fence_context_alloc(1), 0, - false); - if (!array) - goto err_fences_put; - - dma_resv_add_excl_fence(obj, &array->base); - dma_fence_put(&array->base); - } - - return 0; - -err_fences_put: - while (count--) - dma_fence_put(fences[count]); - kfree(fences); - return -ENOMEM; + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } /** - * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation + * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation * - * @dmabuf: DMA-buf where we attach to - * @attach: attachment to add + * @attach: attachment to pin down * - * Add the attachment as user to the exported DMA-buf. + * Pin the BO which is backing the DMA-buf so that it can't move any more. */ -static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) +static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) { - struct drm_gem_object *obj = dmabuf->priv; + struct drm_gem_object *obj = attach->dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int r; - if (attach->dev->driver == adev->dev->driver) - return 0; - - r = amdgpu_bo_reserve(bo, false); - if (unlikely(r != 0)) - return r; - - /* - * We only create shared fences for internal use, but importers - * of the dmabuf rely on exclusive fences for implicitly - * tracking write hazards. As any of the current fences may - * correspond to a write, we need to convert all existing - * fences on the reservation object into a single exclusive - * fence. - */ - r = __dma_resv_make_exclusive(bo->tbo.base.resv); + /* pin buffer into GTT */ + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); if (r) return r; - bo->prime_shared_count++; - amdgpu_bo_unreserve(bo); + if (bo->tbo.moving) { + r = dma_fence_wait(bo->tbo.moving, true); + if (r) { + amdgpu_bo_unpin(bo); + return r; + } + } return 0; } /** - * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation + * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation * - * @dmabuf: DMA-buf where we remove the attachment from - * @attach: the attachment to remove + * @attach: attachment to unpin * - * Called when an attachment is removed from the DMA-buf. + * Unpin a previously pinned BO to make it movable again. */ -static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) +static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach) { - struct drm_gem_object *obj = dmabuf->priv; + struct drm_gem_object *obj = attach->dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) - bo->prime_shared_count--; + amdgpu_bo_unpin(bo); } /** @@ -241,27 +156,60 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, struct dma_buf *dma_buf = attach->dmabuf; struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct sg_table *sgt; long r; - r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); - if (r) - return ERR_PTR(r); + if (!bo->tbo.pin_count) { + /* move buffer into GTT or VRAM */ + struct ttm_operation_ctx ctx = { false, false }; + unsigned domains = AMDGPU_GEM_DOMAIN_GTT; - sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); - if (IS_ERR(sgt)) - return sgt; + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && + attach->peer2peer) { + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + domains |= AMDGPU_GEM_DOMAIN_VRAM; + } + amdgpu_bo_placement_from_domain(bo, domains); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + return ERR_PTR(r); + + } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) & + AMDGPU_GEM_DOMAIN_GTT)) { + return ERR_PTR(-EBUSY); + } - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) - goto error_free; + switch (bo->tbo.resource->mem_type) { + case TTM_PL_TT: + sgt = drm_prime_pages_to_sg(obj->dev, + bo->tbo.ttm->pages, + bo->tbo.ttm->num_pages); + if (IS_ERR(sgt)) + return sgt; + + if (dma_map_sgtable(attach->dev, sgt, dir, + DMA_ATTR_SKIP_CPU_SYNC)) + goto error_free; + break; + + case TTM_PL_VRAM: + r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, + bo->tbo.base.size, attach->dev, + dir, &sgt); + if (r) + return ERR_PTR(r); + break; + default: + return ERR_PTR(-EINVAL); + } return sgt; error_free: sg_free_table(sgt); kfree(sgt); - return ERR_PTR(-ENOMEM); + return ERR_PTR(-EBUSY); } /** @@ -277,13 +225,13 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - struct drm_gem_object *obj = attach->dmabuf->priv; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); - sg_free_table(sgt); - kfree(sgt); - amdgpu_bo_unpin(bo); + if (sgt->sgl->page_link) { + dma_unmap_sgtable(attach->dev, sgt, dir, 0); + sg_free_table(sgt); + kfree(sgt); + } else { + amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt); + } } /** @@ -317,7 +265,8 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, if (unlikely(ret != 0)) return ret; - if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { + if (!bo->tbo.pin_count && + (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); } @@ -327,9 +276,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, } const struct dma_buf_ops amdgpu_dmabuf_ops = { - .dynamic_mapping = true, .attach = amdgpu_dma_buf_attach, .detach = amdgpu_dma_buf_detach, + .pin = amdgpu_dma_buf_pin, + .unpin = amdgpu_dma_buf_unpin, .map_dma_buf = amdgpu_dma_buf_map, .unmap_dma_buf = amdgpu_dma_buf_unmap, .release = drm_gem_dmabuf_release, @@ -360,10 +310,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, return ERR_PTR(-EPERM); buf = drm_gem_prime_export(gobj, flags); - if (!IS_ERR(buf)) { - buf->file->f_mapping = gobj->dev->anon_inode->i_mapping; + if (!IS_ERR(buf)) buf->ops = &amdgpu_dmabuf_ops; - } return buf; } @@ -384,30 +332,32 @@ static struct drm_gem_object * amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) { struct dma_resv *resv = dma_buf->resv; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); + struct drm_gem_object *gobj; struct amdgpu_bo *bo; - struct amdgpu_bo_param bp; + uint64_t flags = 0; int ret; - memset(&bp, 0, sizeof(bp)); - bp.size = dma_buf->size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_CPU; - bp.flags = 0; - bp.type = ttm_bo_type_sg; - bp.resv = resv; dma_resv_lock(resv, NULL); - ret = amdgpu_bo_create(adev, &bp, &bo); + + if (dma_buf->ops == &amdgpu_dmabuf_ops) { + struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv); + + flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC; + } + + ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_CPU, flags, + ttm_bo_type_sg, resv, &gobj); if (ret) goto error; + bo = gem_to_amdgpu_bo(gobj); bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; - if (dma_buf->ops != &amdgpu_dmabuf_ops) - bo->prime_shared_count = 1; dma_resv_unlock(resv); - return &bo->tbo.base; + return gobj; error: dma_resv_unlock(resv); @@ -415,6 +365,74 @@ error: } /** + * amdgpu_dma_buf_move_notify - &attach.move_notify implementation + * + * @attach: the DMA-buf attachment + * + * Invalidate the DMA-buf attachment, making sure that the we re-create the + * mapping before the next use. + */ +static void +amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->importer_priv; + struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_operation_ctx ctx = { false, false }; + struct ttm_placement placement = {}; + struct amdgpu_vm_bo_base *bo_base; + int r; + + if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM) + return; + + r = ttm_bo_validate(&bo->tbo, &placement, &ctx); + if (r) { + DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r); + return; + } + + for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { + struct amdgpu_vm *vm = bo_base->vm; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; + + if (ticket) { + /* When we get an error here it means that somebody + * else is holding the VM lock and updating page tables + * So we can just continue here. + */ + r = dma_resv_lock(resv, ticket); + if (r) + continue; + + } else { + /* TODO: This is more problematic and we actually need + * to allow page tables updates without holding the + * lock. + */ + if (!dma_resv_trylock(resv)) + continue; + } + + r = amdgpu_vm_clear_freed(adev, vm, NULL); + if (!r) + r = amdgpu_vm_handle_moved(adev, vm); + + if (r && r != -EBUSY) + DRM_ERROR("Failed to invalidate VM page tables (%d))\n", + r); + + dma_resv_unlock(resv); + } +} + +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { + .allow_peer2peer = true, + .move_notify = amdgpu_dma_buf_move_notify +}; + +/** * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation * @dev: DRM device * @dma_buf: Shared DMA buffer @@ -446,7 +464,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, if (IS_ERR(obj)) return obj; - attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true); + attach = dma_buf_dynamic_attach(dma_buf, dev->dev, + &amdgpu_dma_buf_attach_ops, obj); if (IS_ERR(attach)) { drm_gem_object_put(obj); return ERR_CAST(attach); @@ -456,3 +475,36 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, obj->import_attach = attach; return obj; } + +/** + * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer + * + * @adev: amdgpu_device pointer of the importer + * @bo: amdgpu buffer object + * + * Returns: + * True if dmabuf accessible over xgmi, false otherwise. + */ +bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, + struct amdgpu_bo *bo) +{ + struct drm_gem_object *obj = &bo->tbo.base; + struct drm_gem_object *gobj; + + if (obj->import_attach) { + struct dma_buf *dma_buf = obj->import_attach->dmabuf; + + if (dma_buf->ops != &amdgpu_dmabuf_ops) + /* No XGMI with non AMD GPUs */ + return false; + + gobj = dma_buf->priv; + bo = gem_to_amdgpu_bo(gobj); + } + + if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && + (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) + return true; + + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h index ec447a7b6b28..3e93b9b407a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h @@ -29,10 +29,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, int flags); struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); +bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, + struct amdgpu_bo *bo); extern const struct dma_buf_ops amdgpu_dmabuf_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index 3fa18003d4d6..89e6ad30396f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -53,6 +53,7 @@ struct amdgpu_doorbell_index { uint32_t gfx_ring0; uint32_t gfx_ring1; uint32_t sdma_engine[8]; + uint32_t mes_ring; uint32_t ih; union { struct { @@ -177,9 +178,12 @@ typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT AMDGPU_NAVI10_DOORBELL_USERQUEUE_END = 0x08A, AMDGPU_NAVI10_DOORBELL_GFX_RING0 = 0x08B, AMDGPU_NAVI10_DOORBELL_GFX_RING1 = 0x08C, + AMDGPU_NAVI10_DOORBELL_MES_RING = 0x090, /* SDMA:256~335*/ AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0 = 0x100, AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1 = 0x10A, + AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2 = 0x114, + AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3 = 0x11E, /* IH: 376~391 */ AMDGPU_NAVI10_DOORBELL_IH = 0x178, /* MMSCH: 392~407 @@ -191,8 +195,13 @@ typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT AMDGPU_NAVI10_DOORBELL64_VCN4_5 = 0x18A, AMDGPU_NAVI10_DOORBELL64_VCN6_7 = 0x18B, + AMDGPU_NAVI10_DOORBELL64_VCN8_9 = 0x18C, + AMDGPU_NAVI10_DOORBELL64_VCNa_b = 0x18D, + AMDGPU_NAVI10_DOORBELL64_VCNc_d = 0x18E, + AMDGPU_NAVI10_DOORBELL64_VCNe_f = 0x18F, + AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0, - AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP = AMDGPU_NAVI10_DOORBELL64_VCN6_7, + AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP = AMDGPU_NAVI10_DOORBELL64_VCNe_f, AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT = 0x18F, AMDGPU_NAVI10_DOORBELL_INVALID = 0xFFFF diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c deleted file mode 100644 index 9cc270efee7c..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ /dev/null @@ -1,970 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Alex Deucher - */ - -#include "amdgpu.h" -#include "amdgpu_atombios.h" -#include "amdgpu_i2c.h" -#include "amdgpu_dpm.h" -#include "atom.h" -#include "amd_pcie.h" - -void amdgpu_dpm_print_class_info(u32 class, u32 class2) -{ - const char *s; - - switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { - case ATOM_PPLIB_CLASSIFICATION_UI_NONE: - default: - s = "none"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: - s = "battery"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: - s = "balanced"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: - s = "performance"; - break; - } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); - if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && - (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); -} - -void amdgpu_dpm_print_cap_info(u32 caps) -{ - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); -} - -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); -} - -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) -{ - struct drm_device *ddev = adev->ddev; - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - - adev->pm.dpm.new_active_crtcs = 0; - adev->pm.dpm.new_active_crtc_count = 0; - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (amdgpu_crtc->enabled) { - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); - adev->pm.dpm.new_active_crtc_count++; - } - } - } -} - - -u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev->ddev; - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vblank_in_pixels; - u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ - - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vblank_in_pixels = - amdgpu_crtc->hw_mode.crtc_htotal * - (amdgpu_crtc->hw_mode.crtc_vblank_end - - amdgpu_crtc->hw_mode.crtc_vdisplay + - (amdgpu_crtc->v_border * 2)); - - vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; - break; - } - } - } - - return vblank_time_us; -} - -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev->ddev; - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vrefresh = 0; - - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - break; - } - } - } - - return vrefresh; -} - -bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) -{ - switch (sensor) { - case THERMAL_TYPE_RV6XX: - case THERMAL_TYPE_RV770: - case THERMAL_TYPE_EVERGREEN: - case THERMAL_TYPE_SUMO: - case THERMAL_TYPE_NI: - case THERMAL_TYPE_SI: - case THERMAL_TYPE_CI: - case THERMAL_TYPE_KV: - return true; - case THERMAL_TYPE_ADT7473_WITH_INTERNAL: - case THERMAL_TYPE_EMC2103_WITH_INTERNAL: - return false; /* need special handling */ - case THERMAL_TYPE_NONE: - case THERMAL_TYPE_EXTERNAL: - case THERMAL_TYPE_EXTERNAL_GPIO: - default: - return false; - } -} - -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; - struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; - struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; -}; - -union fan_info { - struct _ATOM_PPLIB_FANTABLE fan; - struct _ATOM_PPLIB_FANTABLE2 fan2; - struct _ATOM_PPLIB_FANTABLE3 fan3; -}; - -static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, - ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) -{ - u32 size = atom_table->ucNumEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - int i; - ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; - - amdgpu_table->entries = kzalloc(size, GFP_KERNEL); - if (!amdgpu_table->entries) - return -ENOMEM; - - entry = &atom_table->entries[0]; - for (i = 0; i < atom_table->ucNumEntries; i++) { - amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | - (entry->ucClockHigh << 16); - amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); - } - amdgpu_table->count = atom_table->ucNumEntries; - - return 0; -} - -int amdgpu_get_platform_caps(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); - - return 0; -} - -/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 - -int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - union power_info *power_info; - union fan_info *fan_info; - ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - int ret, i; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - /* fan table */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { - if (power_info->pplib3.usFanTableOffset) { - fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib3.usFanTableOffset)); - adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; - adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); - adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); - adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); - adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); - adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); - adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); - if (fan_info->fan.ucFanTableFormat >= 2) - adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); - else - adev->pm.dpm.fan.t_max = 10900; - adev->pm.dpm.fan.cycle_delay = 100000; - if (fan_info->fan.ucFanTableFormat >= 3) { - adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; - adev->pm.dpm.fan.default_max_fan_pwm = - le16_to_cpu(fan_info->fan3.usFanPWMMax); - adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; - adev->pm.dpm.fan.fan_output_sensitivity = - le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); - } - adev->pm.dpm.fan.ucode_fan_control = true; - } - } - - /* clock dependancy tables, shedding tables */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { - if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { - ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = - (ATOM_PPLIB_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); - if (clk_v->ucNumEntries) { - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = - le16_to_cpu(clk_v->entries[0].usSclkLow) | - (clk_v->entries[0].ucSclkHigh << 16); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = - le16_to_cpu(clk_v->entries[0].usMclkLow) | - (clk_v->entries[0].ucMclkHigh << 16); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = - le16_to_cpu(clk_v->entries[0].usVddc); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = - le16_to_cpu(clk_v->entries[0].usVddci); - } - } - if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { - ATOM_PPLIB_PhaseSheddingLimits_Table *psl = - (ATOM_PPLIB_PhaseSheddingLimits_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); - ATOM_PPLIB_PhaseSheddingLimits_Record *entry; - - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = - kcalloc(psl->ucNumEntries, - sizeof(struct amdgpu_phase_shedding_limits_entry), - GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - - entry = &psl->entries[0]; - for (i = 0; i < psl->ucNumEntries; i++) { - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = - le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = - le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); - } - adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = - psl->ucNumEntries; - } - } - - /* cac data */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { - adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); - adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); - adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; - adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); - if (adev->pm.dpm.tdp_od_limit) - adev->pm.dpm.power_control = true; - else - adev->pm.dpm.power_control = false; - adev->pm.dpm.tdp_adjustment = 0; - adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); - adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); - adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); - if (power_info->pplib5.usCACLeakageTableOffset) { - ATOM_PPLIB_CAC_Leakage_Table *cac_table = - (ATOM_PPLIB_CAC_Leakage_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); - ATOM_PPLIB_CAC_Leakage_Record *entry; - u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); - adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - entry = &cac_table->entries[0]; - for (i = 0; i < cac_table->ucNumEntries; i++) { - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = - le16_to_cpu(entry->usVddc1); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = - le16_to_cpu(entry->usVddc2); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = - le16_to_cpu(entry->usVddc3); - } else { - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = - le16_to_cpu(entry->usVddc); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = - le32_to_cpu(entry->ulLeakageValue); - } - entry = (ATOM_PPLIB_CAC_Leakage_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); - } - adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; - } - } - - /* ext tables */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { - ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && - ext_hdr->usVCETableOffset) { - VCEClockInfoArray *array = (VCEClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1); - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = - (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + - 1 + array->ucNumEntries * sizeof(VCEClockInfo)); - ATOM_PPLIB_VCE_State_Table *states = - (ATOM_PPLIB_VCE_State_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + - 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + - 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; - ATOM_PPLIB_VCE_State_Record *state_entry; - VCEClockInfo *vce_clk; - u32 size = limits->numEntries * - sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - state_entry = &states->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - vce_clk = (VCEClockInfo *) - ((u8 *)&array->entries[0] + - (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = - le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = - le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); - } - adev->pm.dpm.num_of_vce_states = - states->numEntries > AMD_MAX_VCE_LEVELS ? - AMD_MAX_VCE_LEVELS : states->numEntries; - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - vce_clk = (VCEClockInfo *) - ((u8 *)&array->entries[0] + - (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); - adev->pm.dpm.vce_states[i].evclk = - le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); - adev->pm.dpm.vce_states[i].ecclk = - le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); - adev->pm.dpm.vce_states[i].clk_idx = - state_entry->ucClockInfoIndex & 0x3f; - adev->pm.dpm.vce_states[i].pstate = - (state_entry->ucClockInfoIndex & 0xc0) >> 6; - state_entry = (ATOM_PPLIB_VCE_State_Record *) - ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && - ext_hdr->usUVDTableOffset) { - UVDClockInfoArray *array = (UVDClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = - (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + - 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - UVDClockInfo *uvd_clk = (UVDClockInfo *) - ((u8 *)&array->entries[0] + - (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = - le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = - le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && - ext_hdr->usSAMUTableOffset) { - ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = - (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); - ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && - ext_hdr->usPPMTableOffset) { - ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPPMTableOffset)); - adev->pm.dpm.dyn_state.ppm_table = - kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.ppm_table) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; - adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = - le16_to_cpu(ppm->usCpuCoreNumber); - adev->pm.dpm.dyn_state.ppm_table->platform_tdp = - le32_to_cpu(ppm->ulPlatformTDP); - adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = - le32_to_cpu(ppm->ulSmallACPlatformTDP); - adev->pm.dpm.dyn_state.ppm_table->platform_tdc = - le32_to_cpu(ppm->ulPlatformTDC); - adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = - le32_to_cpu(ppm->ulSmallACPlatformTDC); - adev->pm.dpm.dyn_state.ppm_table->apu_tdp = - le32_to_cpu(ppm->ulApuTDP); - adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = - le32_to_cpu(ppm->ulDGpuTDP); - adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = - le32_to_cpu(ppm->ulDGpuUlvPower); - adev->pm.dpm.dyn_state.ppm_table->tj_max = - le32_to_cpu(ppm->ulTjmax); - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && - ext_hdr->usACPTableOffset) { - ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = - (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usACPTableOffset) + 1); - ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && - ext_hdr->usPowerTuneTableOffset) { - u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - ATOM_PowerTune_Table *pt; - adev->pm.dpm.dyn_state.cac_tdp_table = - kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_tdp_table) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - if (rev > 0) { - ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; - pt = &ppt->power_tune_table; - } else { - ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; - pt = &ppt->power_tune_table; - } - adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); - adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = - le16_to_cpu(pt->usConfigurableTDP); - adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); - adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = - le16_to_cpu(pt->usBatteryPowerLimit); - adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = - le16_to_cpu(pt->usSmallPowerLimit); - adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = - le16_to_cpu(pt->usLowCACLeakage); - adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = - le16_to_cpu(pt->usHighCACLeakage); - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && - ext_hdr->usSclkVddgfxTableOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); - ret = amdgpu_parse_clk_voltage_dep_table( - &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, - dep_table); - if (ret) { - kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); - return ret; - } - } - } - - return 0; -} - -void amdgpu_free_extended_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; - - kfree(dyn_state->vddc_dependency_on_sclk.entries); - kfree(dyn_state->vddci_dependency_on_mclk.entries); - kfree(dyn_state->vddc_dependency_on_mclk.entries); - kfree(dyn_state->mvdd_dependency_on_mclk.entries); - kfree(dyn_state->cac_leakage_table.entries); - kfree(dyn_state->phase_shedding_limits_table.entries); - kfree(dyn_state->ppm_table); - kfree(dyn_state->cac_tdp_table); - kfree(dyn_state->vce_clock_voltage_dependency_table.entries); - kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); - kfree(dyn_state->samu_clock_voltage_dependency_table.entries); - kfree(dyn_state->acp_clock_voltage_dependency_table.entries); - kfree(dyn_state->vddgfx_dependency_on_sclk.entries); -} - -static const char *pp_lib_thermal_controller_names[] = { - "NONE", - "lm63", - "adm1032", - "adm1030", - "max6649", - "lm64", - "f75375", - "RV6xx", - "RV770", - "adt7473", - "NONE", - "External GPIO", - "Evergreen", - "emc2103", - "Sumo", - "Northern Islands", - "Southern Islands", - "lm96163", - "Sea Islands", - "Kaveri/Kabini", -}; - -void amdgpu_add_thermal_controller(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - ATOM_PPLIB_POWERPLAYTABLE *power_table; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - ATOM_PPLIB_THERMALCONTROLLER *controller; - struct amdgpu_i2c_bus_rec i2c_bus; - u16 data_offset; - u8 frev, crev; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return; - power_table = (ATOM_PPLIB_POWERPLAYTABLE *) - (mode_info->atom_context->bios + data_offset); - controller = &power_table->sThermalController; - - /* add the i2c bus for thermal/fan chip */ - if (controller->ucType > 0) { - if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) - adev->pm.no_fan = true; - adev->pm.fan_pulses_per_revolution = - controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; - if (adev->pm.fan_pulses_per_revolution) { - adev->pm.fan_min_rpm = controller->ucFanMinRPM; - adev->pm.fan_max_rpm = controller->ucFanMaxRPM; - } - if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_RV770; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_NI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_SI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_CI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_KV; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; - } else if (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; - } else if (controller->ucType == - ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; - } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", - pp_lib_thermal_controller_names[controller->ucType], - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; - i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); - adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); - if (adev->pm.i2c_bus) { - struct i2c_board_info info = { }; - const char *name = pp_lib_thermal_controller_names[controller->ucType]; - info.addr = controller->ucI2cAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&adev->pm.i2c_bus->adapter, &info); - } - } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", - controller->ucType, - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - } - } -} - -enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum amdgpu_pcie_gen asic_gen, - enum amdgpu_pcie_gen default_gen) -{ - switch (asic_gen) { - case AMDGPU_PCIE_GEN1: - return AMDGPU_PCIE_GEN1; - case AMDGPU_PCIE_GEN2: - return AMDGPU_PCIE_GEN2; - case AMDGPU_PCIE_GEN3: - return AMDGPU_PCIE_GEN3; - default: - if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && - (default_gen == AMDGPU_PCIE_GEN3)) - return AMDGPU_PCIE_GEN3; - else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && - (default_gen == AMDGPU_PCIE_GEN2)) - return AMDGPU_PCIE_GEN2; - else - return AMDGPU_PCIE_GEN1; - } - return AMDGPU_PCIE_GEN1; -} - -struct amd_vce_state* -amdgpu_get_vce_clock_state(void *handle, u32 idx) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (idx < adev->pm.dpm.num_of_vce_states) - return &adev->pm.dpm.vce_states[idx]; - - return NULL; -} - -int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) -{ - uint32_t clk_freq; - int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, - low ? &clk_freq : NULL, - !low ? &clk_freq : NULL, - true); - if (ret) - return 0; - return clk_freq * 100; - - } else { - return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); - } -} - -int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) -{ - uint32_t clk_freq; - int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, - low ? &clk_freq : NULL, - !low ? &clk_freq : NULL, - true); - if (ret) - return 0; - return clk_freq * 100; - - } else { - return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); - } -} - -int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) -{ - int ret = 0; - bool swsmu = is_support_sw_smu(adev); - - switch (block_type) { - case AMD_IP_BLOCK_TYPE_GFX: - case AMD_IP_BLOCK_TYPE_UVD: - case AMD_IP_BLOCK_TYPE_VCN: - case AMD_IP_BLOCK_TYPE_VCE: - case AMD_IP_BLOCK_TYPE_SDMA: - if (swsmu) - ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); - else - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( - (adev)->powerplay.pp_handle, block_type, gate)); - break; - case AMD_IP_BLOCK_TYPE_GMC: - case AMD_IP_BLOCK_TYPE_ACP: - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( - (adev)->powerplay.pp_handle, block_type, gate)); - break; - default: - break; - } - - return ret; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h deleted file mode 100644 index 2cfb677272af..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ /dev/null @@ -1,520 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __AMDGPU_DPM_H__ -#define __AMDGPU_DPM_H__ - -enum amdgpu_int_thermal_type { - THERMAL_TYPE_NONE, - THERMAL_TYPE_EXTERNAL, - THERMAL_TYPE_EXTERNAL_GPIO, - THERMAL_TYPE_RV6XX, - THERMAL_TYPE_RV770, - THERMAL_TYPE_ADT7473_WITH_INTERNAL, - THERMAL_TYPE_EVERGREEN, - THERMAL_TYPE_SUMO, - THERMAL_TYPE_NI, - THERMAL_TYPE_SI, - THERMAL_TYPE_EMC2103_WITH_INTERNAL, - THERMAL_TYPE_CI, - THERMAL_TYPE_KV, -}; - -enum amdgpu_dpm_auto_throttle_src { - AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, - AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL -}; - -enum amdgpu_dpm_event_src { - AMDGPU_DPM_EVENT_SRC_ANALOG = 0, - AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, - AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, - AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, - AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 -}; - -struct amdgpu_ps { - u32 caps; /* vbios flags */ - u32 class; /* vbios flags */ - u32 class2; /* vbios flags */ - /* UVD clocks */ - u32 vclk; - u32 dclk; - /* VCE clocks */ - u32 evclk; - u32 ecclk; - bool vce_active; - enum amd_vce_level vce_level; - /* asic priv */ - void *ps_priv; -}; - -struct amdgpu_dpm_thermal { - /* thermal interrupt work */ - struct work_struct work; - /* low temperature threshold */ - int min_temp; - /* high temperature threshold */ - int max_temp; - /* edge max emergency(shutdown) temp */ - int max_edge_emergency_temp; - /* hotspot low temperature threshold */ - int min_hotspot_temp; - /* hotspot high temperature critical threshold */ - int max_hotspot_crit_temp; - /* hotspot max emergency(shutdown) temp */ - int max_hotspot_emergency_temp; - /* memory low temperature threshold */ - int min_mem_temp; - /* memory high temperature critical threshold */ - int max_mem_crit_temp; - /* memory max emergency(shutdown) temp */ - int max_mem_emergency_temp; - /* was last interrupt low to high or high to low */ - bool high_to_low; - /* interrupt source */ - struct amdgpu_irq_src irq; -}; - -enum amdgpu_clk_action -{ - AMDGPU_SCLK_UP = 1, - AMDGPU_SCLK_DOWN -}; - -struct amdgpu_blacklist_clocks -{ - u32 sclk; - u32 mclk; - enum amdgpu_clk_action action; -}; - -struct amdgpu_clock_and_voltage_limits { - u32 sclk; - u32 mclk; - u16 vddc; - u16 vddci; -}; - -struct amdgpu_clock_array { - u32 count; - u32 *values; -}; - -struct amdgpu_clock_voltage_dependency_entry { - u32 clk; - u16 v; -}; - -struct amdgpu_clock_voltage_dependency_table { - u32 count; - struct amdgpu_clock_voltage_dependency_entry *entries; -}; - -union amdgpu_cac_leakage_entry { - struct { - u16 vddc; - u32 leakage; - }; - struct { - u16 vddc1; - u16 vddc2; - u16 vddc3; - }; -}; - -struct amdgpu_cac_leakage_table { - u32 count; - union amdgpu_cac_leakage_entry *entries; -}; - -struct amdgpu_phase_shedding_limits_entry { - u16 voltage; - u32 sclk; - u32 mclk; -}; - -struct amdgpu_phase_shedding_limits_table { - u32 count; - struct amdgpu_phase_shedding_limits_entry *entries; -}; - -struct amdgpu_uvd_clock_voltage_dependency_entry { - u32 vclk; - u32 dclk; - u16 v; -}; - -struct amdgpu_uvd_clock_voltage_dependency_table { - u8 count; - struct amdgpu_uvd_clock_voltage_dependency_entry *entries; -}; - -struct amdgpu_vce_clock_voltage_dependency_entry { - u32 ecclk; - u32 evclk; - u16 v; -}; - -struct amdgpu_vce_clock_voltage_dependency_table { - u8 count; - struct amdgpu_vce_clock_voltage_dependency_entry *entries; -}; - -struct amdgpu_ppm_table { - u8 ppm_design; - u16 cpu_core_number; - u32 platform_tdp; - u32 small_ac_platform_tdp; - u32 platform_tdc; - u32 small_ac_platform_tdc; - u32 apu_tdp; - u32 dgpu_tdp; - u32 dgpu_ulv_power; - u32 tj_max; -}; - -struct amdgpu_cac_tdp_table { - u16 tdp; - u16 configurable_tdp; - u16 tdc; - u16 battery_power_limit; - u16 small_power_limit; - u16 low_cac_leakage; - u16 high_cac_leakage; - u16 maximum_power_delivery_limit; -}; - -struct amdgpu_dpm_dynamic_state { - struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; - struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; - struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; - struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; - struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; - struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; - struct amdgpu_clock_array valid_sclk_values; - struct amdgpu_clock_array valid_mclk_values; - struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; - struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; - u32 mclk_sclk_ratio; - u32 sclk_mclk_delta; - u16 vddc_vddci_delta; - u16 min_vddc_for_pcie_gen2; - struct amdgpu_cac_leakage_table cac_leakage_table; - struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; - struct amdgpu_ppm_table *ppm_table; - struct amdgpu_cac_tdp_table *cac_tdp_table; -}; - -struct amdgpu_dpm_fan { - u16 t_min; - u16 t_med; - u16 t_high; - u16 pwm_min; - u16 pwm_med; - u16 pwm_high; - u8 t_hyst; - u32 cycle_delay; - u16 t_max; - u8 control_mode; - u16 default_max_fan_pwm; - u16 default_fan_output_sensitivity; - u16 fan_output_sensitivity; - bool ucode_fan_control; -}; - -enum amdgpu_pcie_gen { - AMDGPU_PCIE_GEN1 = 0, - AMDGPU_PCIE_GEN2 = 1, - AMDGPU_PCIE_GEN3 = 2, - AMDGPU_PCIE_GEN_INVALID = 0xffff -}; - -#define amdgpu_dpm_pre_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_post_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_display_configuration_changed(adev) \ - ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_print_power_state(adev, ps) \ - ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) - -#define amdgpu_dpm_vblank_too_short(adev) \ - ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_enable_bapm(adev, e) \ - ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) - -#define amdgpu_dpm_set_fan_control_mode(adev, m) \ - ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m))) - -#define amdgpu_dpm_get_fan_control_mode(adev) \ - ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_set_fan_speed_percent(adev, s) \ - ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s))) - -#define amdgpu_dpm_get_fan_speed_percent(adev, s) \ - ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s))) - -#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \ - ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s)) - -#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \ - ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s)) - -#define amdgpu_dpm_force_performance_level(adev, l) \ - ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l))) - -#define amdgpu_dpm_get_current_power_state(adev) \ - ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_get_pp_num_states(adev, data) \ - ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) - -#define amdgpu_dpm_get_pp_table(adev, table) \ - ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)) - -#define amdgpu_dpm_set_pp_table(adev, buf, size) \ - ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)) - -#define amdgpu_dpm_print_clock_levels(adev, type, buf) \ - ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)) - -#define amdgpu_dpm_force_clock_level(adev, type, level) \ - ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)) - -#define amdgpu_dpm_get_sclk_od(adev) \ - ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_set_sclk_od(adev, value) \ - ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)) - -#define amdgpu_dpm_get_mclk_od(adev) \ - ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_set_mclk_od(adev, value) \ - ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) - -#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \ - ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state)) - -#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ - ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) - -#define amdgpu_dpm_get_vce_clock_state(adev, i) \ - ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i))) - -#define amdgpu_dpm_get_performance_level(adev) \ - ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_reset_power_profile_state(adev, request) \ - ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ - (adev)->powerplay.pp_handle, request)) - -#define amdgpu_dpm_switch_power_profile(adev, type, en) \ - ((adev)->powerplay.pp_funcs->switch_power_profile(\ - (adev)->powerplay.pp_handle, type, en)) - -#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \ - ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ - (adev)->powerplay.pp_handle, msg_id)) - -#define amdgpu_dpm_get_power_profile_mode(adev, buf) \ - ((adev)->powerplay.pp_funcs->get_power_profile_mode(\ - (adev)->powerplay.pp_handle, buf)) - -#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \ - ((adev)->powerplay.pp_funcs->set_power_profile_mode(\ - (adev)->powerplay.pp_handle, parameter, size)) - -#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \ - ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\ - (adev)->powerplay.pp_handle, type, parameter, size)) - -#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \ - ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\ - (adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_get_ppfeature_status(adev, buf) \ - ((adev)->powerplay.pp_funcs->get_ppfeature_status(\ - (adev)->powerplay.pp_handle, (buf))) - -#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \ - ((adev)->powerplay.pp_funcs->set_ppfeature_status(\ - (adev)->powerplay.pp_handle, (ppfeatures))) - -struct amdgpu_dpm { - struct amdgpu_ps *ps; - /* number of valid power states */ - int num_ps; - /* current power state that is active */ - struct amdgpu_ps *current_ps; - /* requested power state */ - struct amdgpu_ps *requested_ps; - /* boot up power state */ - struct amdgpu_ps *boot_ps; - /* default uvd power state */ - struct amdgpu_ps *uvd_ps; - /* vce requirements */ - u32 num_of_vce_states; - struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS]; - enum amd_vce_level vce_level; - enum amd_pm_state_type state; - enum amd_pm_state_type user_state; - enum amd_pm_state_type last_state; - enum amd_pm_state_type last_user_state; - u32 platform_caps; - u32 voltage_response_time; - u32 backbias_response_time; - void *priv; - u32 new_active_crtcs; - int new_active_crtc_count; - u32 current_active_crtcs; - int current_active_crtc_count; - struct amdgpu_dpm_dynamic_state dyn_state; - struct amdgpu_dpm_fan fan; - u32 tdp_limit; - u32 near_tdp_limit; - u32 near_tdp_limit_adjusted; - u32 sq_ramping_threshold; - u32 cac_leakage; - u16 tdp_od_limit; - u32 tdp_adjustment; - u16 load_line_slope; - bool power_control; - /* special states active */ - bool thermal_active; - bool uvd_active; - bool vce_active; - /* thermal handling */ - struct amdgpu_dpm_thermal thermal; - /* forced levels */ - enum amd_dpm_forced_level forced_level; -}; - -struct amdgpu_pm { - struct mutex mutex; - u32 current_sclk; - u32 current_mclk; - u32 default_sclk; - u32 default_mclk; - struct amdgpu_i2c_chan *i2c_bus; - /* internal thermal controller on rv6xx+ */ - enum amdgpu_int_thermal_type int_thermal_type; - struct device *int_hwmon_dev; - /* fan control parameters */ - bool no_fan; - u8 fan_pulses_per_revolution; - u8 fan_min_rpm; - u8 fan_max_rpm; - /* dpm */ - bool dpm_enabled; - bool sysfs_initialized; - struct amdgpu_dpm dpm; - const struct firmware *fw; /* SMC firmware */ - uint32_t fw_version; - uint32_t pcie_gen_mask; - uint32_t pcie_mlw_mask; - struct amd_pp_display_configuration pm_display_cfg;/* set by dc */ - uint32_t smu_prv_buffer_size; - struct amdgpu_bo *smu_prv_buffer; - bool ac_power; - /* powerplay feature */ - uint32_t pp_feature; - -}; - -#define R600_SSTU_DFLT 0 -#define R600_SST_DFLT 0x00C8 - -/* XXX are these ok? */ -#define R600_TEMP_RANGE_MIN (90 * 1000) -#define R600_TEMP_RANGE_MAX (120 * 1000) - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - -enum amdgpu_td { - AMDGPU_TD_AUTO, - AMDGPU_TD_UP, - AMDGPU_TD_DOWN, -}; - -enum amdgpu_display_watermark { - AMDGPU_DISPLAY_WATERMARK_LOW = 0, - AMDGPU_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum amdgpu_display_gap -{ - AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - AMDGPU_PM_DISPLAY_GAP_VBLANK = 1, - AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2, - AMDGPU_PM_DISPLAY_GAP_IGNORE = 3, -}; - -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); -u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); -int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, - void *data, uint32_t *size); - -bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor); - -int amdgpu_get_platform_caps(struct amdgpu_device *adev); - -int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); -void amdgpu_free_extended_power_table(struct amdgpu_device *adev); - -void amdgpu_add_thermal_controller(struct amdgpu_device *adev); - -enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum amdgpu_pcie_gen asic_gen, - enum amdgpu_pcie_gen default_gen); - -struct amd_vce_state* -amdgpu_get_vce_clock_state(void *handle, u32 idx); - -int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, - uint32_t block_type, bool gate); - -extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low); - -extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low); - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0ffc9447b573..ad95de6399af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -23,27 +23,33 @@ */ #include <drm/amdgpu_drm.h> +#include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_gem.h> #include <drm/drm_vblank.h> +#include <drm/drm_managed.h> #include "amdgpu_drv.h" #include <drm/drm_pciids.h> #include <linux/console.h> #include <linux/module.h> -#include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> #include <drm/drm_probe_helper.h> #include <linux/mmu_notifier.h> +#include <linux/suspend.h> +#include <linux/cc_platform.h> #include "amdgpu.h" #include "amdgpu_irq.h" #include "amdgpu_dma_buf.h" - +#include "amdgpu_sched.h" +#include "amdgpu_fdinfo.h" #include "amdgpu_amdkfd.h" #include "amdgpu_ras.h" +#include "amdgpu_xgmi.h" +#include "amdgpu_reset.h" /* * KMS wrapper. @@ -85,21 +91,29 @@ * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask * - 3.36.0 - Allow reading more status registers on si/cik + * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness + * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC + * - 3.39.0 - DMABUF implicit sync does a full pipeline sync + * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ + * - 3.41.0 - Add video codec query + * - 3.42.0 - Add 16bpc fixed point display support + * - 3.43.0 - Add device hot plug/unplug support + * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 36 +#define KMS_DRIVER_MINOR 44 #define KMS_DRIVER_PATCHLEVEL 0 -int amdgpu_vram_limit = 0; -int amdgpu_vis_vram_limit = 0; +int amdgpu_vram_limit; +int amdgpu_vis_vram_limit; int amdgpu_gart_size = -1; /* auto */ int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ -int amdgpu_benchmarking = 0; -int amdgpu_testing = 0; +int amdgpu_benchmarking; +int amdgpu_testing; int amdgpu_audio = -1; -int amdgpu_disp_priority = 0; -int amdgpu_hw_i2c = 0; +int amdgpu_disp_priority; +int amdgpu_hw_i2c; int amdgpu_pcie_gen2 = -1; int amdgpu_msi = -1; char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; @@ -109,47 +123,77 @@ int amdgpu_aspm = -1; int amdgpu_runtime_pm = -1; uint amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; -int amdgpu_deep_color = 0; +int amdgpu_deep_color; int amdgpu_vm_size = -1; int amdgpu_vm_fragment_size = -1; int amdgpu_vm_block_size = -1; -int amdgpu_vm_fault_stop = 0; -int amdgpu_vm_debug = 0; +int amdgpu_vm_fault_stop; +int amdgpu_vm_debug; int amdgpu_vm_update_mode = -1; -int amdgpu_exp_hw_support = 0; +int amdgpu_exp_hw_support; int amdgpu_dc = -1; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; -uint amdgpu_pcie_gen_cap = 0; -uint amdgpu_pcie_lane_cap = 0; +uint amdgpu_pcie_gen_cap; +uint amdgpu_pcie_lane_cap; uint amdgpu_cg_mask = 0xffffffff; uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -/* OverDrive(bit 14) disabled by default*/ -uint amdgpu_pp_feature_mask = 0xffffbfff; -uint amdgpu_force_long_training = 0; -int amdgpu_job_hang_limit = 0; + +/* + * OverDrive(bit 14) disabled by default + * GFX DCS(bit 19) disabled by default + */ +uint amdgpu_pp_feature_mask = 0xfff7bfff; +uint amdgpu_force_long_training; +int amdgpu_job_hang_limit; int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ -int amdgpu_emu_mode = 0; -uint amdgpu_smu_memory_pool_size = 0; -/* FBC (bit 0) disabled by default*/ -uint amdgpu_dc_feature_mask = 0; +int amdgpu_emu_mode; +uint amdgpu_smu_memory_pool_size; +int amdgpu_smu_pptable_id = -1; +/* + * FBC (bit 0) disabled by default + * MULTI_MON_PP_MCLK_SWITCH (bit 1) enabled by default + * - With this, for multiple monitors in sync(e.g. with the same model), + * mclk switching will be allowed. And the mclk will be not foced to the + * highest. That helps saving some idle power. + * DISABLE_FRACTIONAL_PWM (bit 2) disabled by default + * PSR (bit 3) disabled by default + * EDP NO POWER SEQUENCING (bit 4) disabled by default + */ +uint amdgpu_dc_feature_mask = 2; +uint amdgpu_dc_debug_mask; int amdgpu_async_gfx_ring = 1; -int amdgpu_mcbp = 0; +int amdgpu_mcbp; int amdgpu_discovery = -1; -int amdgpu_mes = 0; -int amdgpu_noretry = 1; +int amdgpu_mes; +int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; +int amdgpu_tmz = -1; /* auto */ +uint amdgpu_freesync_vid_mode; +int amdgpu_reset_method = -1; /* auto */ +int amdgpu_num_kcq = -1; +int amdgpu_smartshift_bias; + +static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), + .delayed_reset_work = __DELAYED_WORK_INITIALIZER( + mgpu_info.delayed_reset_work, + amdgpu_drv_delayed_reset_work_handler, 0), }; int amdgpu_ras_enable = -1; uint amdgpu_ras_mask = 0xffffffff; +int amdgpu_bad_page_threshold = -1; +struct amdgpu_watchdog_timer amdgpu_watchdog_timer = { + .timeout_fatal_disable = false, + .period = 0x0, /* default to 0x0 (timeout disable) */ +}; /** * DOC: vramlimit (int) @@ -250,9 +294,9 @@ module_param_named(msi, amdgpu_msi, int, 0444); * for SDMA and Video. * * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) - * jobs is 10000. And there is no timeout enforced on compute jobs. + * jobs is 10000. The timeout for compute is 60000. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; " +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " "for passthrough or sriov, 10000 for all jobs." " 0: keep default value. negative: infinity timeout), " "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " @@ -262,7 +306,7 @@ module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_ /** * DOC: dpm (int) * Override for dynamic power management setting - * (0 = disable, 1 = enable, 2 = enable sw smu driver for vega20) + * (0 = disable, 1 = enable) * The default is -1 (auto). */ MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); @@ -287,7 +331,7 @@ module_param_named(aspm, amdgpu_aspm, int, 0444); * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality. */ -MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); +MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)"); module_param_named(runpm, amdgpu_runtime_pm, int, 0444); /** @@ -387,12 +431,12 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); /** - * DOC: ppfeaturemask (uint) + * DOC: ppfeaturemask (hexint) * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h. * The default is the current set of stable power features. */ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); -module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); +module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, hexint, 0444); /** * DOC: forcelongtraining (uint) @@ -480,7 +524,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); * DOC: gpu_recovery (int) * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV). */ -MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)"); +MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)"); module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); /** @@ -506,6 +550,20 @@ MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444); /** + * DOC: timeout_fatal_disable (bool) + * Disable Watchdog timeout fatal error event + */ +MODULE_PARM_DESC(timeout_fatal_disable, "disable watchdog timeout fatal error (false = default)"); +module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_disable, bool, 0644); + +/** + * DOC: timeout_period (uint) + * Modify the watchdog timeout max_cycles as (1 << period) + */ +MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)"); +module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); + +/** * DOC: si_support (int) * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled, * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available, @@ -572,7 +630,7 @@ module_param_named(mcbp, amdgpu_mcbp, int, 0444); /** * DOC: discovery (int) * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM. - * (-1 = auto (default), 0 = disabled, 1 = enabled) + * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file) */ MODULE_PARM_DESC(discovery, "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM"); @@ -587,8 +645,14 @@ MODULE_PARM_DESC(mes, "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)"); module_param_named(mes, amdgpu_mes, int, 0444); +/** + * DOC: noretry (int) + * Disable XNACK retry in the SQ by default on GFXv9 hardware. On ASICs that + * do not support per-process XNACK this also disables retry page faults. + * (0 = retry enabled, 1 = retry disabled, -1 auto (default)) + */ MODULE_PARM_DESC(noretry, - "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); + "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))"); module_param_named(noretry, amdgpu_noretry, int, 0644); /** @@ -670,11 +734,14 @@ MODULE_PARM_DESC(debug_largebar, * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT * table to get information about AMD APUs. This option can serve as a workaround on * systems with a broken CRAT table. + * + * Default is auto (according to asic type, iommu_v2, and crat table, to decide + * whehter use CRAT) */ int ignore_crat; module_param(ignore_crat, int, 0444); MODULE_PARM_DESC(ignore_crat, - "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); + "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)"); /** * DOC: halt_if_hws_hang (int) @@ -687,13 +754,12 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau /** * DOC: hws_gws_support(bool) - * Whether HWS support gws barriers. Default value: false (not supported) - * This will be replaced with a MEC firmware version check once firmware - * is ready + * Assume that HWS supports GWS barriers regardless of what firmware version + * check says. Default value: false (rely on MEC2 firmware version check). */ bool hws_gws_support; module_param(hws_gws_support, bool, 0444); -MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)"); +MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)"); /** * DOC: queue_preemption_timeout_ms (int) @@ -702,6 +768,30 @@ MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supp int queue_preemption_timeout_ms = 9000; module_param(queue_preemption_timeout_ms, int, 0644); MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)"); + +/** + * DOC: debug_evictions(bool) + * Enable extra debug messages to help determine the cause of evictions + */ +bool debug_evictions; +module_param(debug_evictions, bool, 0644); +MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)"); + +/** + * DOC: no_system_mem_limit(bool) + * Disable system memory limit, to support multiple process shared memory + */ +bool no_system_mem_limit; +module_param(no_system_mem_limit, bool, 0644); +MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)"); + +/** + * DOC: no_queue_eviction_on_vm_fault (int) + * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction). + */ +int amdgpu_no_queue_eviction_on_vm_fault = 0; +MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)"); +module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444); #endif /** @@ -713,6 +803,13 @@ MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))"); module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); /** + * DOC: dcdebugmask (uint) + * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h. + */ +MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))"); +module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444); + +/** * DOC: abmlevel (uint) * Override the default ABM (Adaptive Backlight Management) level used for DC * enabled hardware. Requires DMCU to be supported and loaded. @@ -724,10 +821,708 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); * Defaults to 0, or disabled. Userspace can still override this level later * after boot. */ -uint amdgpu_dm_abm_level = 0; +uint amdgpu_dm_abm_level; MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) "); module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444); +int amdgpu_backlight = -1; +MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))"); +module_param_named(backlight, amdgpu_backlight, bint, 0444); + +/** + * DOC: tmz (int) + * Trusted Memory Zone (TMZ) is a method to protect data being written + * to or read from memory. + * + * The default value: 0 (off). TODO: change to auto till it is completed. + */ +MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)"); +module_param_named(tmz, amdgpu_tmz, int, 0444); + +/** + * DOC: freesync_video (uint) + * Enable the optimization to adjust front porch timing to achieve seamless + * mode change experience when setting a freesync supported mode for which full + * modeset is not needed. + * + * The Display Core will add a set of modes derived from the base FreeSync + * video mode into the corresponding connector's mode list based on commonly + * used refresh rates and VRR range of the connected display, when users enable + * this feature. From the userspace perspective, they can see a seamless mode + * change experience when the change between different refresh rates under the + * same resolution. Additionally, userspace applications such as Video playback + * can read this modeset list and change the refresh rate based on the video + * frame rate. Finally, the userspace can also derive an appropriate mode for a + * particular refresh rate based on the FreeSync Mode and add it to the + * connector's mode list. + * + * Note: This is an experimental feature. + * + * The default value: 0 (off). + */ +MODULE_PARM_DESC( + freesync_video, + "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); +module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); + +/** + * DOC: reset_method (int) + * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci) + */ +MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)"); +module_param_named(reset_method, amdgpu_reset_method, int, 0444); + +/** + * DOC: bad_page_threshold (int) Bad page threshold is specifies the + * threshold value of faulty pages detected by RAS ECC, which may + * result in the GPU entering bad status when the number of total + * faulty pages by ECC exceeds the threshold value. + */ +MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)"); +module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444); + +MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)"); +module_param_named(num_kcq, amdgpu_num_kcq, int, 0444); + +/** + * DOC: smu_pptable_id (int) + * Used to override pptable id. id = 0 use VBIOS pptable. + * id > 0 use the soft pptable with specicfied id. + */ +MODULE_PARM_DESC(smu_pptable_id, + "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); +module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); + +/* These devices are not supported by amdgpu. + * They are supported by the mach64, r128, radeon drivers + */ +static const u16 amdgpu_unsupported_pciidlist[] = { + /* mach64 */ + 0x4354, + 0x4358, + 0x4554, + 0x4742, + 0x4744, + 0x4749, + 0x474C, + 0x474D, + 0x474E, + 0x474F, + 0x4750, + 0x4751, + 0x4752, + 0x4753, + 0x4754, + 0x4755, + 0x4756, + 0x4757, + 0x4758, + 0x4759, + 0x475A, + 0x4C42, + 0x4C44, + 0x4C47, + 0x4C49, + 0x4C4D, + 0x4C4E, + 0x4C50, + 0x4C51, + 0x4C52, + 0x4C53, + 0x5654, + 0x5655, + 0x5656, + /* r128 */ + 0x4c45, + 0x4c46, + 0x4d46, + 0x4d4c, + 0x5041, + 0x5042, + 0x5043, + 0x5044, + 0x5045, + 0x5046, + 0x5047, + 0x5048, + 0x5049, + 0x504A, + 0x504B, + 0x504C, + 0x504D, + 0x504E, + 0x504F, + 0x5050, + 0x5051, + 0x5052, + 0x5053, + 0x5054, + 0x5055, + 0x5056, + 0x5057, + 0x5058, + 0x5245, + 0x5246, + 0x5247, + 0x524b, + 0x524c, + 0x534d, + 0x5446, + 0x544C, + 0x5452, + /* radeon */ + 0x3150, + 0x3151, + 0x3152, + 0x3154, + 0x3155, + 0x3E50, + 0x3E54, + 0x4136, + 0x4137, + 0x4144, + 0x4145, + 0x4146, + 0x4147, + 0x4148, + 0x4149, + 0x414A, + 0x414B, + 0x4150, + 0x4151, + 0x4152, + 0x4153, + 0x4154, + 0x4155, + 0x4156, + 0x4237, + 0x4242, + 0x4336, + 0x4337, + 0x4437, + 0x4966, + 0x4967, + 0x4A48, + 0x4A49, + 0x4A4A, + 0x4A4B, + 0x4A4C, + 0x4A4D, + 0x4A4E, + 0x4A4F, + 0x4A50, + 0x4A54, + 0x4B48, + 0x4B49, + 0x4B4A, + 0x4B4B, + 0x4B4C, + 0x4C57, + 0x4C58, + 0x4C59, + 0x4C5A, + 0x4C64, + 0x4C66, + 0x4C67, + 0x4E44, + 0x4E45, + 0x4E46, + 0x4E47, + 0x4E48, + 0x4E49, + 0x4E4A, + 0x4E4B, + 0x4E50, + 0x4E51, + 0x4E52, + 0x4E53, + 0x4E54, + 0x4E56, + 0x5144, + 0x5145, + 0x5146, + 0x5147, + 0x5148, + 0x514C, + 0x514D, + 0x5157, + 0x5158, + 0x5159, + 0x515A, + 0x515E, + 0x5460, + 0x5462, + 0x5464, + 0x5548, + 0x5549, + 0x554A, + 0x554B, + 0x554C, + 0x554D, + 0x554E, + 0x554F, + 0x5550, + 0x5551, + 0x5552, + 0x5554, + 0x564A, + 0x564B, + 0x564F, + 0x5652, + 0x5653, + 0x5657, + 0x5834, + 0x5835, + 0x5954, + 0x5955, + 0x5974, + 0x5975, + 0x5960, + 0x5961, + 0x5962, + 0x5964, + 0x5965, + 0x5969, + 0x5a41, + 0x5a42, + 0x5a61, + 0x5a62, + 0x5b60, + 0x5b62, + 0x5b63, + 0x5b64, + 0x5b65, + 0x5c61, + 0x5c63, + 0x5d48, + 0x5d49, + 0x5d4a, + 0x5d4c, + 0x5d4d, + 0x5d4e, + 0x5d4f, + 0x5d50, + 0x5d52, + 0x5d57, + 0x5e48, + 0x5e4a, + 0x5e4b, + 0x5e4c, + 0x5e4d, + 0x5e4f, + 0x6700, + 0x6701, + 0x6702, + 0x6703, + 0x6704, + 0x6705, + 0x6706, + 0x6707, + 0x6708, + 0x6709, + 0x6718, + 0x6719, + 0x671c, + 0x671d, + 0x671f, + 0x6720, + 0x6721, + 0x6722, + 0x6723, + 0x6724, + 0x6725, + 0x6726, + 0x6727, + 0x6728, + 0x6729, + 0x6738, + 0x6739, + 0x673e, + 0x6740, + 0x6741, + 0x6742, + 0x6743, + 0x6744, + 0x6745, + 0x6746, + 0x6747, + 0x6748, + 0x6749, + 0x674A, + 0x6750, + 0x6751, + 0x6758, + 0x6759, + 0x675B, + 0x675D, + 0x675F, + 0x6760, + 0x6761, + 0x6762, + 0x6763, + 0x6764, + 0x6765, + 0x6766, + 0x6767, + 0x6768, + 0x6770, + 0x6771, + 0x6772, + 0x6778, + 0x6779, + 0x677B, + 0x6840, + 0x6841, + 0x6842, + 0x6843, + 0x6849, + 0x684C, + 0x6850, + 0x6858, + 0x6859, + 0x6880, + 0x6888, + 0x6889, + 0x688A, + 0x688C, + 0x688D, + 0x6898, + 0x6899, + 0x689b, + 0x689c, + 0x689d, + 0x689e, + 0x68a0, + 0x68a1, + 0x68a8, + 0x68a9, + 0x68b0, + 0x68b8, + 0x68b9, + 0x68ba, + 0x68be, + 0x68bf, + 0x68c0, + 0x68c1, + 0x68c7, + 0x68c8, + 0x68c9, + 0x68d8, + 0x68d9, + 0x68da, + 0x68de, + 0x68e0, + 0x68e1, + 0x68e4, + 0x68e5, + 0x68e8, + 0x68e9, + 0x68f1, + 0x68f2, + 0x68f8, + 0x68f9, + 0x68fa, + 0x68fe, + 0x7100, + 0x7101, + 0x7102, + 0x7103, + 0x7104, + 0x7105, + 0x7106, + 0x7108, + 0x7109, + 0x710A, + 0x710B, + 0x710C, + 0x710E, + 0x710F, + 0x7140, + 0x7141, + 0x7142, + 0x7143, + 0x7144, + 0x7145, + 0x7146, + 0x7147, + 0x7149, + 0x714A, + 0x714B, + 0x714C, + 0x714D, + 0x714E, + 0x714F, + 0x7151, + 0x7152, + 0x7153, + 0x715E, + 0x715F, + 0x7180, + 0x7181, + 0x7183, + 0x7186, + 0x7187, + 0x7188, + 0x718A, + 0x718B, + 0x718C, + 0x718D, + 0x718F, + 0x7193, + 0x7196, + 0x719B, + 0x719F, + 0x71C0, + 0x71C1, + 0x71C2, + 0x71C3, + 0x71C4, + 0x71C5, + 0x71C6, + 0x71C7, + 0x71CD, + 0x71CE, + 0x71D2, + 0x71D4, + 0x71D5, + 0x71D6, + 0x71DA, + 0x71DE, + 0x7200, + 0x7210, + 0x7211, + 0x7240, + 0x7243, + 0x7244, + 0x7245, + 0x7246, + 0x7247, + 0x7248, + 0x7249, + 0x724A, + 0x724B, + 0x724C, + 0x724D, + 0x724E, + 0x724F, + 0x7280, + 0x7281, + 0x7283, + 0x7284, + 0x7287, + 0x7288, + 0x7289, + 0x728B, + 0x728C, + 0x7290, + 0x7291, + 0x7293, + 0x7297, + 0x7834, + 0x7835, + 0x791e, + 0x791f, + 0x793f, + 0x7941, + 0x7942, + 0x796c, + 0x796d, + 0x796e, + 0x796f, + 0x9400, + 0x9401, + 0x9402, + 0x9403, + 0x9405, + 0x940A, + 0x940B, + 0x940F, + 0x94A0, + 0x94A1, + 0x94A3, + 0x94B1, + 0x94B3, + 0x94B4, + 0x94B5, + 0x94B9, + 0x9440, + 0x9441, + 0x9442, + 0x9443, + 0x9444, + 0x9446, + 0x944A, + 0x944B, + 0x944C, + 0x944E, + 0x9450, + 0x9452, + 0x9456, + 0x945A, + 0x945B, + 0x945E, + 0x9460, + 0x9462, + 0x946A, + 0x946B, + 0x947A, + 0x947B, + 0x9480, + 0x9487, + 0x9488, + 0x9489, + 0x948A, + 0x948F, + 0x9490, + 0x9491, + 0x9495, + 0x9498, + 0x949C, + 0x949E, + 0x949F, + 0x94C0, + 0x94C1, + 0x94C3, + 0x94C4, + 0x94C5, + 0x94C6, + 0x94C7, + 0x94C8, + 0x94C9, + 0x94CB, + 0x94CC, + 0x94CD, + 0x9500, + 0x9501, + 0x9504, + 0x9505, + 0x9506, + 0x9507, + 0x9508, + 0x9509, + 0x950F, + 0x9511, + 0x9515, + 0x9517, + 0x9519, + 0x9540, + 0x9541, + 0x9542, + 0x954E, + 0x954F, + 0x9552, + 0x9553, + 0x9555, + 0x9557, + 0x955f, + 0x9580, + 0x9581, + 0x9583, + 0x9586, + 0x9587, + 0x9588, + 0x9589, + 0x958A, + 0x958B, + 0x958C, + 0x958D, + 0x958E, + 0x958F, + 0x9590, + 0x9591, + 0x9593, + 0x9595, + 0x9596, + 0x9597, + 0x9598, + 0x9599, + 0x959B, + 0x95C0, + 0x95C2, + 0x95C4, + 0x95C5, + 0x95C6, + 0x95C7, + 0x95C9, + 0x95CC, + 0x95CD, + 0x95CE, + 0x95CF, + 0x9610, + 0x9611, + 0x9612, + 0x9613, + 0x9614, + 0x9615, + 0x9616, + 0x9640, + 0x9641, + 0x9642, + 0x9643, + 0x9644, + 0x9645, + 0x9647, + 0x9648, + 0x9649, + 0x964a, + 0x964b, + 0x964c, + 0x964e, + 0x964f, + 0x9710, + 0x9711, + 0x9712, + 0x9713, + 0x9714, + 0x9715, + 0x9802, + 0x9803, + 0x9804, + 0x9805, + 0x9806, + 0x9807, + 0x9808, + 0x9809, + 0x980A, + 0x9900, + 0x9901, + 0x9903, + 0x9904, + 0x9905, + 0x9906, + 0x9907, + 0x9908, + 0x9909, + 0x990A, + 0x990B, + 0x990C, + 0x990D, + 0x990E, + 0x990F, + 0x9910, + 0x9913, + 0x9917, + 0x9918, + 0x9919, + 0x9990, + 0x9991, + 0x9992, + 0x9993, + 0x9994, + 0x9995, + 0x9996, + 0x9997, + 0x9998, + 0x9999, + 0x999A, + 0x999B, + 0x999C, + 0x999D, + 0x99A0, + 0x99A2, + 0x99A4, +}; + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -985,10 +1780,10 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, /* Arcturus */ - {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, + {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, + {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, + {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, /* Navi10 */ {0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, @@ -996,6 +1791,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, + {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, /* Navi14 */ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, @@ -1004,28 +1800,116 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ - {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, + {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, + {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, + {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, /* Navi12 */ - {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12}, + {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12}, + + /* Sienna_Cichlid */ + {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + + /* Van Gogh */ + {0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU}, + + /* Yellow Carp */ + {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU}, + {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU}, + + /* Navy_Flounder */ + {0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + {0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER}, + + /* DIMGREY_CAVEFISH */ + {0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + {0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, + + /* Aldebaran */ + {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, + + /* CYAN_SKILLFISH */ + {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, + + /* BEIGE_GOBY */ + {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + + { PCI_DEVICE(0x1002, PCI_ANY_ID), + .class = PCI_CLASS_DISPLAY_VGA << 8, + .class_mask = 0xffffff, + .driver_data = CHIP_IP_DISCOVERY }, + + { PCI_DEVICE(0x1002, PCI_ANY_ID), + .class = PCI_CLASS_DISPLAY_OTHER << 8, + .class_mask = 0xffffff, + .driver_data = CHIP_IP_DISCOVERY }, {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); -static struct drm_driver kms_driver; +static const struct drm_driver amdgpu_kms_driver; static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct drm_device *dev; + struct drm_device *ddev; + struct amdgpu_device *adev; unsigned long flags = ent->driver_data; - int ret, retry = 0; + int ret, retry = 0, i; bool supports_atomic = false; - if (!amdgpu_virtual_display && + /* skip devices which are owned by radeon */ + for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { + if (amdgpu_unsupported_pciidlist[i] == pdev->device) + return -ENODEV; + } + + if (flags == 0) { + DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n"); + return -ENODEV; + } + + if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; @@ -1035,6 +1919,17 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } + /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, + * however, SME requires an indirect IOMMU mapping because the encryption + * bit is beyond the DMA mask of the chip. + */ + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) && + ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { + dev_info(&pdev->dev, + "SME is not compatible with RAVEN\n"); + return -ENOTSUPP; + } + #ifdef CONFIG_DRM_AMDGPU_SI if (!amdgpu_si_support) { switch (flags & AMD_ASIC_MASK) { @@ -1071,41 +1966,50 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, #endif /* Get rid of things like offb */ - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); if (ret) return ret; - dev = drm_dev_alloc(&kms_driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); + adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); + if (IS_ERR(adev)) + return PTR_ERR(adev); + + adev->dev = &pdev->dev; + adev->pdev = pdev; + ddev = adev_to_drm(adev); if (!supports_atomic) - dev->driver_features &= ~DRIVER_ATOMIC; + ddev->driver_features &= ~DRIVER_ATOMIC; ret = pci_enable_device(pdev); if (ret) - goto err_free; + return ret; - dev->pdev = pdev; + pci_set_drvdata(pdev, ddev); - pci_set_drvdata(pdev, dev); + ret = amdgpu_driver_load_kms(adev, ent->driver_data); + if (ret) + goto err_pci; retry_init: - ret = drm_dev_register(dev, ent->driver_data); + ret = drm_dev_register(ddev, ent->driver_data); if (ret == -EAGAIN && ++retry <= 3) { DRM_INFO("retry init %d\n", retry); /* Don't request EX mode too frequently which is attacking */ msleep(5000); goto retry_init; - } else if (ret) + } else if (ret) { goto err_pci; + } + + ret = amdgpu_debugfs_init(adev); + if (ret) + DRM_ERROR("Creating debugfs files failed (%d).\n", ret); return 0; err_pci: pci_disable_device(pdev); -err_free: - drm_dev_put(dev); return ret; } @@ -1114,21 +2018,23 @@ amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); -#ifdef MODULE - if (THIS_MODULE->state != MODULE_STATE_GOING) -#endif - DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); - drm_dev_put(dev); + amdgpu_driver_unload_kms(dev); + + /* + * Flush any in flight DMA operations from device. + * Clear the Bus Master Enable bit and then wait on the PCIe Device + * StatusTransactions Pending bit. + */ pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + pci_wait_for_pending_transaction(pdev); } static void amdgpu_pci_shutdown(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (amdgpu_ras_intr_triggered()) return; @@ -1138,39 +2044,144 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) * unfortunately we can't detect certain * hypervisors so just do this all the time. */ - adev->mp1_state = PP_MP1_STATE_UNLOAD; + if (!amdgpu_passthrough(adev)) + adev->mp1_state = PP_MP1_STATE_UNLOAD; amdgpu_device_ip_suspend(adev); adev->mp1_state = PP_MP1_STATE_NONE; } +/** + * amdgpu_drv_delayed_reset_work_handler - work handler for reset + * + * @work: work_struct. + */ +static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) +{ + struct list_head device_list; + struct amdgpu_device *adev; + int i, r; + struct amdgpu_reset_context reset_context; + + memset(&reset_context, 0, sizeof(reset_context)); + + mutex_lock(&mgpu_info.mutex); + if (mgpu_info.pending_reset == true) { + mutex_unlock(&mgpu_info.mutex); + return; + } + mgpu_info.pending_reset = true; + mutex_unlock(&mgpu_info.mutex); + + /* Use a common context, just need to make sure full reset is done */ + reset_context.method = AMD_RESET_METHOD_NONE; + set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + + for (i = 0; i < mgpu_info.num_dgpu; i++) { + adev = mgpu_info.gpu_ins[i].adev; + reset_context.reset_req_dev = adev; + r = amdgpu_device_pre_asic_reset(adev, &reset_context); + if (r) { + dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", + r, adev_to_drm(adev)->unique); + } + if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work)) + r = -EALREADY; + } + for (i = 0; i < mgpu_info.num_dgpu; i++) { + adev = mgpu_info.gpu_ins[i].adev; + flush_work(&adev->xgmi_reset_work); + adev->gmc.xgmi.pending_reset = false; + } + + /* reset function will rebuild the xgmi hive info , clear it now */ + for (i = 0; i < mgpu_info.num_dgpu; i++) + amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev); + + INIT_LIST_HEAD(&device_list); + + for (i = 0; i < mgpu_info.num_dgpu; i++) + list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list); + + /* unregister the GPU first, reset function will add them back */ + list_for_each_entry(adev, &device_list, reset_list) + amdgpu_unregister_gpu_instance(adev); + + /* Use a common context, just need to make sure full reset is done */ + set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); + r = amdgpu_do_asic_reset(&device_list, &reset_context); + + if (r) { + DRM_ERROR("reinit gpus failure"); + return; + } + for (i = 0; i < mgpu_info.num_dgpu; i++) { + adev = mgpu_info.gpu_ins[i].adev; + if (!adev->kfd.init_complete) + amdgpu_amdkfd_device_init(adev); + amdgpu_ttm_set_buffer_funcs_status(adev, true); + } + return; +} + +static int amdgpu_pmops_prepare(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + + /* Return a positive number here so + * DPM_FLAG_SMART_SUSPEND works properly + */ + if (amdgpu_device_supports_boco(drm_dev)) + return pm_runtime_suspended(dev) && + pm_suspend_via_firmware(); + + return 0; +} + +static void amdgpu_pmops_complete(struct device *dev) +{ + /* nothing to do */ +} + static int amdgpu_pmops_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_suspend(drm_dev, true, true); + if (amdgpu_acpi_is_s0ix_active(adev)) + adev->in_s0ix = true; + adev->in_s3 = true; + r = amdgpu_device_suspend(drm_dev, true); + adev->in_s3 = false; + + return r; } static int amdgpu_pmops_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - /* GPU comes up enabled by the bios on resume */ - if (amdgpu_device_is_px(drm_dev)) { - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - } + /* Avoids registers access if device is physically gone */ + if (!pci_device_is_present(adev->pdev)) + adev->no_hw_access = true; - return amdgpu_device_resume(drm_dev, true, true); + r = amdgpu_device_resume(drm_dev, true); + if (amdgpu_acpi_is_s0ix_active(adev)) + adev->in_s0ix = false; + return r; } static int amdgpu_pmops_freeze(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(drm_dev); int r; - r = amdgpu_device_suspend(drm_dev, false, true); + adev->in_s4 = true; + r = amdgpu_device_suspend(drm_dev, true); + adev->in_s4 = false; if (r) return r; return amdgpu_asic_reset(adev); @@ -1180,46 +2191,69 @@ static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_resume(drm_dev, false, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_suspend(drm_dev, true, true); + return amdgpu_device_suspend(drm_dev, true); } static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_resume(drm_dev, false, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int ret, i; - if (!amdgpu_device_is_px(drm_dev)) { + if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } - drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - drm_kms_helper_poll_disable(drm_dev); + /* wait for all rings to drain before suspending */ + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) { + ret = amdgpu_fence_wait_empty(ring); + if (ret) + return -EBUSY; + } + } - ret = amdgpu_device_suspend(drm_dev, false, false); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_ignore_hotplug(pdev); - if (amdgpu_is_atpx_hybrid()) + adev->in_runpm = true; + if (amdgpu_device_supports_px(drm_dev)) + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + + ret = amdgpu_device_suspend(drm_dev, false); + if (ret) { + adev->in_runpm = false; + return ret; + } + + if (amdgpu_device_supports_px(drm_dev)) { + /* Only need to handle PCI state in the driver for ATPX + * PCI core handles it for _PR3. + */ + amdgpu_device_cache_pci_state(pdev); + pci_disable_device(pdev); + pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3cold); - else if (!amdgpu_has_atpx_dgpu_power_cntl()) - pci_set_power_state(pdev, PCI_D3hot); - drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + } else if (amdgpu_device_supports_boco(drm_dev)) { + /* nothing to do */ + } else if (amdgpu_device_supports_baco(drm_dev)) { + amdgpu_device_baco_enter(drm_dev); + } return 0; } @@ -1228,49 +2262,97 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); int ret; - if (!amdgpu_device_is_px(drm_dev)) + if (!adev->runpm) return -EINVAL; - drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + /* Avoids registers access if device is physically gone */ + if (!pci_device_is_present(adev->pdev)) + adev->no_hw_access = true; + + if (amdgpu_device_supports_px(drm_dev)) { + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - if (amdgpu_is_atpx_hybrid() || - !amdgpu_has_atpx_dgpu_power_cntl()) + /* Only need to handle PCI state in the driver for ATPX + * PCI core handles it for _PR3. + */ pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); + amdgpu_device_load_pci_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + } else if (amdgpu_device_supports_boco(drm_dev)) { + /* Only need to handle PCI state in the driver for ATPX + * PCI core handles it for _PR3. + */ + pci_set_master(pdev); + } else if (amdgpu_device_supports_baco(drm_dev)) { + amdgpu_device_baco_exit(drm_dev); + } + ret = amdgpu_device_resume(drm_dev, false); if (ret) return ret; - pci_set_master(pdev); - ret = amdgpu_device_resume(drm_dev, false, false); - drm_kms_helper_poll_enable(drm_dev); - drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + if (amdgpu_device_supports_px(drm_dev)) + drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + adev->in_runpm = false; return 0; } static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_crtc *crtc; + struct amdgpu_device *adev = drm_to_adev(drm_dev); + /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ + int ret = 1; - if (!amdgpu_device_is_px(drm_dev)) { + if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } - list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { - if (crtc->enabled) { - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); - return -EBUSY; + if (amdgpu_device_has_dc_support(adev)) { + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, drm_dev) { + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state->active) + ret = -EBUSY; + drm_modeset_unlock(&crtc->mutex); + if (ret < 0) + break; + } + + } else { + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + + mutex_lock(&drm_dev->mode_config.mutex); + drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); + + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->dpms == DRM_MODE_DPMS_ON) { + ret = -EBUSY; + break; + } } + + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); + mutex_unlock(&drm_dev->mode_config.mutex); } + if (ret == -EBUSY) + DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); + pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); - /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ - return 1; + return ret; } long amdgpu_drm_ioctl(struct file *filp, @@ -1282,16 +2364,19 @@ long amdgpu_drm_ioctl(struct file *filp, dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_ioctl(filp, cmd, arg); pm_runtime_mark_last_busy(dev->dev); +out: pm_runtime_put_autosuspend(dev->dev); return ret; } static const struct dev_pm_ops amdgpu_pm_ops = { + .prepare = amdgpu_pmops_prepare, + .complete = amdgpu_pmops_complete, .suspend = amdgpu_pmops_suspend, .resume = amdgpu_pmops_resume, .freeze = amdgpu_pmops_freeze, @@ -1321,17 +2406,20 @@ static const struct file_operations amdgpu_driver_kms_fops = { .flush = amdgpu_flush, .release = drm_release, .unlocked_ioctl = amdgpu_drm_ioctl, - .mmap = amdgpu_mmap, + .mmap = drm_gem_mmap, .poll = drm_poll, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = amdgpu_kms_compat_ioctl, #endif +#ifdef CONFIG_PROC_FS + .show_fdinfo = amdgpu_show_fdinfo +#endif }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) { - struct drm_file *file; + struct drm_file *file; if (!filp) return -EINVAL; @@ -1345,47 +2433,46 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -static bool -amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) -{ - return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, - stime, etime, mode); -} +const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), + DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + /* KMS */ + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), +}; -static struct drm_driver kms_driver = { +static const struct drm_driver amdgpu_kms_driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_ATOMIC | + DRIVER_ATOMIC | DRIVER_GEM | - DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, - .load = amdgpu_driver_load_kms, + DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | + DRIVER_SYNCOBJ_TIMELINE, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, .lastclose = amdgpu_driver_lastclose_kms, - .unload = amdgpu_driver_unload_kms, - .get_vblank_counter = amdgpu_get_vblank_counter_kms, - .enable_vblank = amdgpu_enable_vblank_kms, - .disable_vblank = amdgpu_disable_vblank_kms, - .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, - .get_scanout_position = amdgpu_get_crtc_scanout_position, - .irq_handler = amdgpu_irq_handler, .ioctls = amdgpu_ioctls_kms, - .gem_free_object_unlocked = amdgpu_gem_object_free, - .gem_open_object = amdgpu_gem_object_open, - .gem_close_object = amdgpu_gem_object_close, + .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, .fops = &amdgpu_driver_kms_fops, + .release = &amdgpu_driver_release_kms, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = amdgpu_gem_prime_export, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_vmap = amdgpu_gem_prime_vmap, - .gem_prime_vunmap = amdgpu_gem_prime_vunmap, - .gem_prime_mmap = amdgpu_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -1395,6 +2482,25 @@ static struct drm_driver kms_driver = { .patchlevel = KMS_DRIVER_PATCHLEVEL, }; +static struct pci_error_handlers amdgpu_pci_err_handler = { + .error_detected = amdgpu_pci_error_detected, + .mmio_enabled = amdgpu_pci_mmio_enabled, + .slot_reset = amdgpu_pci_slot_reset, + .resume = amdgpu_pci_resume, +}; + +extern const struct attribute_group amdgpu_vram_mgr_attr_group; +extern const struct attribute_group amdgpu_gtt_mgr_attr_group; +extern const struct attribute_group amdgpu_vbios_version_attr_group; + +static const struct attribute_group *amdgpu_sysfs_groups[] = { + &amdgpu_vram_mgr_attr_group, + &amdgpu_gtt_mgr_attr_group, + &amdgpu_vbios_version_attr_group, + NULL, +}; + + static struct pci_driver amdgpu_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, @@ -1402,10 +2508,10 @@ static struct pci_driver amdgpu_kms_pci_driver = { .remove = amdgpu_pci_remove, .shutdown = amdgpu_pci_shutdown, .driver.pm = &amdgpu_pm_ops, + .err_handler = &amdgpu_pci_err_handler, + .dev_groups = amdgpu_sysfs_groups, }; - - static int __init amdgpu_init(void) { int r; @@ -1424,8 +2530,8 @@ static int __init amdgpu_init(void) goto error_fence; DRM_INFO("amdgpu kernel modesetting enabled.\n"); - kms_driver.num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); + amdgpu_acpi_detect(); /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ amdgpu_amdkfd_init(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c new file mode 100644 index 000000000000..4d9eb0137f8c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c @@ -0,0 +1,239 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_eeprom.h" +#include "amdgpu.h" + +/* AT24CM02 and M24M02-R have a 256-byte write page size. + */ +#define EEPROM_PAGE_BITS 8 +#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS) +#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1) + +#define EEPROM_OFFSET_SIZE 2 + +/* EEPROM memory addresses are 19-bits long, which can + * be partitioned into 3, 8, 8 bits, for a total of 19. + * The upper 3 bits are sent as part of the 7-bit + * "Device Type Identifier"--an I2C concept, which for EEPROM devices + * is hard-coded as 1010b, indicating that it is an EEPROM + * device--this is the wire format, followed by the upper + * 3 bits of the 19-bit address, followed by the direction, + * followed by two bytes holding the rest of the 16-bits of + * the EEPROM memory address. The format on the wire for EEPROM + * devices is: 1010XYZD, A15:A8, A7:A0, + * Where D is the direction and sequenced out by the hardware. + * Bits XYZ are memory address bits 18, 17 and 16. + * These bits are compared to how pins 1-3 of the part are connected, + * depending on the size of the part, more on that later. + * + * Note that of this wire format, a client is in control + * of, and needs to specify only XYZ, A15:A8, A7:0, bits, + * which is exactly the EEPROM memory address, or offset, + * in order to address up to 8 EEPROM devices on the I2C bus. + * + * For instance, a 2-Mbit I2C EEPROM part, addresses all its bytes, + * using an 18-bit address, bit 17 to 0 and thus would use all but one bit of + * the 19 bits previously mentioned. The designer would then not connect + * pins 1 and 2, and pin 3 usually named "A_2" or "E2", would be connected to + * either Vcc or GND. This would allow for up to two 2-Mbit parts on + * the same bus, where one would be addressable with bit 18 as 1, and + * the other with bit 18 of the address as 0. + * + * For a 2-Mbit part, bit 18 is usually known as the "Chip Enable" or + * "Hardware Address Bit". This bit is compared to the load on pin 3 + * of the device, described above, and if there is a match, then this + * device responds to the command. This way, you can connect two + * 2-Mbit EEPROM devices on the same bus, but see one contiguous + * memory from 0 to 7FFFFh, where address 0 to 3FFFF is in the device + * whose pin 3 is connected to GND, and address 40000 to 7FFFFh is in + * the 2nd device, whose pin 3 is connected to Vcc. + * + * This addressing you encode in the 32-bit "eeprom_addr" below, + * namely the 19-bits "XYZ,A15:A0", as a single 19-bit address. For + * instance, eeprom_addr = 0x6DA01, is 110_1101_1010_0000_0001, where + * XYZ=110b, and A15:A0=DA01h. The XYZ bits become part of the device + * address, and the rest of the address bits are sent as the memory + * address bytes. + * + * That is, for an I2C EEPROM driver everything is controlled by + * the "eeprom_addr". + * + * P.S. If you need to write, lock and read the Identification Page, + * (M24M02-DR device only, which we do not use), change the "7" to + * "0xF" in the macro below, and let the client set bit 20 to 1 in + * "eeprom_addr", and set A10 to 0 to write into it, and A10 and A1 to + * 1 to lock it permanently. + */ +#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 7)) + +static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, + u8 *eeprom_buf, u16 buf_size, bool read) +{ + u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE]; + struct i2c_msg msgs[] = { + { + .flags = 0, + .len = EEPROM_OFFSET_SIZE, + .buf = eeprom_offset_buf, + }, + { + .flags = read ? I2C_M_RD : 0, + }, + }; + const u8 *p = eeprom_buf; + int r; + u16 len; + + for (r = 0; buf_size > 0; + buf_size -= len, eeprom_addr += len, eeprom_buf += len) { + /* Set the EEPROM address we want to write to/read from. + */ + msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr); + msgs[1].addr = msgs[0].addr; + msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff; + msgs[0].buf[1] = eeprom_addr & 0xff; + + if (!read) { + /* Write the maximum amount of data, without + * crossing the device's page boundary, as per + * its spec. Partial page writes are allowed, + * starting at any location within the page, + * so long as the page boundary isn't crossed + * over (actually the page pointer rolls + * over). + * + * As per the AT24CM02 EEPROM spec, after + * writing into a page, the I2C driver should + * terminate the transfer, i.e. in + * "i2c_transfer()" below, with a STOP + * condition, so that the self-timed write + * cycle begins. This is implied for the + * "i2c_transfer()" abstraction. + */ + len = min(EEPROM_PAGE_SIZE - (eeprom_addr & + EEPROM_PAGE_MASK), + (u32)buf_size); + } else { + /* Reading from the EEPROM has no limitation + * on the number of bytes read from the EEPROM + * device--they are simply sequenced out. + */ + len = buf_size; + } + msgs[1].len = len; + msgs[1].buf = eeprom_buf; + + /* This constitutes a START-STOP transaction. + */ + r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs)); + if (r != ARRAY_SIZE(msgs)) + break; + + if (!read) { + /* According to EEPROM specs the length of the + * self-writing cycle, tWR (tW), is 10 ms. + * + * TODO: Use polling on ACK, aka Acknowledge + * Polling, to minimize waiting for the + * internal write cycle to complete, as it is + * usually smaller than tWR (tW). + */ + msleep(10); + } + } + + return r < 0 ? r : eeprom_buf - p; +} + +/** + * amdgpu_eeprom_xfer -- Read/write from/to an I2C EEPROM device + * @i2c_adap: pointer to the I2C adapter to use + * @eeprom_addr: EEPROM address from which to read/write + * @eeprom_buf: pointer to data buffer to read into/write from + * @buf_size: the size of @eeprom_buf + * @read: True if reading from the EEPROM, false if writing + * + * Returns the number of bytes read/written; -errno on error. + */ +static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr, + u8 *eeprom_buf, u16 buf_size, bool read) +{ + const struct i2c_adapter_quirks *quirks = i2c_adap->quirks; + u16 limit; + + if (!quirks) + limit = 0; + else if (read) + limit = quirks->max_read_len; + else + limit = quirks->max_write_len; + + if (limit == 0) { + return __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, + eeprom_buf, buf_size, read); + } else if (limit <= EEPROM_OFFSET_SIZE) { + dev_err_ratelimited(&i2c_adap->dev, + "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d", + eeprom_addr, buf_size, + read ? "read" : "write", EEPROM_OFFSET_SIZE); + return -EINVAL; + } else { + u16 ps; /* Partial size */ + int res = 0, r; + + /* The "limit" includes all data bytes sent/received, + * which would include the EEPROM_OFFSET_SIZE bytes. + * Account for them here. + */ + limit -= EEPROM_OFFSET_SIZE; + for ( ; buf_size > 0; + buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { + ps = min(limit, buf_size); + + r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, + eeprom_buf, ps, read); + if (r < 0) + return r; + res += r; + } + + return res; + } +} + +int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, + u16 bytes) +{ + return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes, + true); +} + +int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, + u16 bytes) +{ + return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes, + false); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h new file mode 100644 index 000000000000..6935adb2be1f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h @@ -0,0 +1,37 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _AMDGPU_EEPROM_H +#define _AMDGPU_EEPROM_H + +#include <linux/i2c.h> + +int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, + u16 bytes); + +int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap, + u32 eeprom_addr, u8 *eeprom_buf, + u16 bytes); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index 61fcf247a638..af4ef84e27a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -35,7 +35,7 @@ void amdgpu_link_encoder_connector(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_connector *connector; struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 143753d237e7..cd0acbea75da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -69,7 +69,7 @@ amdgpufb_release(struct fb_info *info, int user) return 0; } -static struct fb_ops amdgpufb_ops = { +static const struct fb_ops amdgpufb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_open = amdgpufb_open, @@ -114,7 +114,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) amdgpu_bo_unpin(abo); amdgpu_bo_unreserve(abo); } - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); } static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, @@ -133,10 +133,9 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, u32 cpp; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC; + AMDGPU_GEM_CREATE_VRAM_CLEARED; - info = drm_get_format_info(adev->ddev, mode_cmd); + info = drm_get_format_info(adev_to_drm(adev), mode_cmd); cpp = info->cpp[0]; /* need to align pitch with crtc limits */ @@ -147,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, - ttm_bo_type_kernel, NULL, &gobj); + ttm_bo_type_device, NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; @@ -206,8 +205,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper, struct drm_gem_object *gobj = NULL; struct amdgpu_bo *abo = NULL; int ret; - unsigned long tmp; + memset(&mode_cmd, 0, sizeof(mode_cmd)); mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; @@ -232,8 +231,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper, goto out; } - ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb, - &mode_cmd, gobj); + ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb, + &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto out; @@ -246,8 +245,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, info->fbops = &amdgpufb_ops; - tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; - info->fix.smem_start = adev->gmc.aper_base + tmp; + info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo); info->fix.smem_len = amdgpu_bo_size(abo); info->screen_base = amdgpu_bo_kptr(abo); info->screen_size = amdgpu_bo_size(abo); @@ -255,7 +253,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); /* setup aperture base/size for vesafb takeover */ - info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; + info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base; info->apertures->ranges[0].size = adev->gmc.aper_size; /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -271,15 +269,12 @@ static int amdgpufb_create(struct drm_fb_helper *helper, DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); - vga_switcheroo_client_fb_set(adev->ddev->pdev, info); + vga_switcheroo_client_fb_set(adev->pdev, info); return 0; out: - if (abo) { - - } if (fb && ret) { - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); @@ -290,10 +285,13 @@ out: static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) { struct amdgpu_framebuffer *rfb = &rfbdev->rfb; + int i; drm_fb_helper_unregister_fbi(&rfbdev->helper); if (rfb->base.obj[0]) { + for (i = 0; i < rfb->base.format->num_planes; i++) + drm_gem_object_put(rfb->base.obj[0]); amdgpufb_destroy_pinned_object(rfb->base.obj[0]); rfb->base.obj[0] = NULL; drm_framebuffer_unregister_private(&rfb->base); @@ -319,7 +317,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) return 0; /* don't init fbdev if there are no connectors */ - if (list_empty(&adev->ddev->mode_config.connector_list)) + if (list_empty(&adev_to_drm(adev)->mode_config.connector_list)) return 0; /* select 8 bpp console on low vram cards */ @@ -333,21 +331,18 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) rfbdev->adev = adev; adev->mode_info.rfbdev = rfbdev; - drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, - &amdgpu_fb_helper_funcs); + drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper, + &amdgpu_fb_helper_funcs); - ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper, - AMDGPUFB_CONN_LIMIT); + ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper); if (ret) { kfree(rfbdev); return ret; } - drm_fb_helper_single_add_all_connectors(&rfbdev->helper); - /* disable all the possible outputs/crtcs before entering KMS mode */ - if (!amdgpu_device_has_dc_support(adev)) - drm_helper_disable_unused_functions(adev->ddev); + if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display) + drm_helper_disable_unused_functions(adev_to_drm(adev)); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; @@ -358,7 +353,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev) if (!adev->mode_info.rfbdev) return; - amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); + amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev); kfree(adev->mode_info.rfbdev); adev->mode_info.rfbdev = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c new file mode 100644 index 000000000000..5a6857c44bb6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: MIT +/* Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: David Nieto + * Roy Sun + */ + +#include <linux/debugfs.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/reboot.h> +#include <linux/syscalls.h> + +#include <drm/amdgpu_drm.h> +#include <drm/drm_debugfs.h> + +#include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_gem.h" +#include "amdgpu_ctx.h" +#include "amdgpu_fdinfo.h" + + +static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = { + [AMDGPU_HW_IP_GFX] = "gfx", + [AMDGPU_HW_IP_COMPUTE] = "compute", + [AMDGPU_HW_IP_DMA] = "dma", + [AMDGPU_HW_IP_UVD] = "dec", + [AMDGPU_HW_IP_VCE] = "enc", + [AMDGPU_HW_IP_UVD_ENC] = "enc_1", + [AMDGPU_HW_IP_VCN_DEC] = "dec", + [AMDGPU_HW_IP_VCN_ENC] = "enc", + [AMDGPU_HW_IP_VCN_JPEG] = "jpeg", +}; + +void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) +{ + struct amdgpu_fpriv *fpriv; + uint32_t bus, dev, fn, i, domain; + uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0; + struct drm_file *file = f->private_data; + struct amdgpu_device *adev = drm_to_adev(file->minor->dev); + struct amdgpu_bo *root; + int ret; + + ret = amdgpu_file_to_fpriv(f, &fpriv); + if (ret) + return; + bus = adev->pdev->bus->number; + domain = pci_domain_nr(adev->pdev->bus); + dev = PCI_SLOT(adev->pdev->devfn); + fn = PCI_FUNC(adev->pdev->devfn); + + root = amdgpu_bo_ref(fpriv->vm.root.bo); + if (!root) + return; + + ret = amdgpu_bo_reserve(root, false); + if (ret) { + DRM_ERROR("Fail to reserve bo\n"); + return; + } + amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, >t_mem, &cpu_mem); + amdgpu_bo_unreserve(root); + amdgpu_bo_unref(&root); + + seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus, + dev, fn, fpriv->vm.pasid); + seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL); + seq_printf(m, "gtt mem:\t%llu kB\n", gtt_mem/1024UL); + seq_printf(m, "cpu mem:\t%llu kB\n", cpu_mem/1024UL); + for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { + uint32_t count = amdgpu_ctx_num_entities[i]; + int idx = 0; + uint64_t total = 0, min = 0; + uint32_t perc, frac; + + for (idx = 0; idx < count; idx++) { + total = amdgpu_ctx_mgr_fence_usage(&fpriv->ctx_mgr, + i, idx, &min); + if ((total == 0) || (min == 0)) + continue; + + perc = div64_u64(10000 * total, min); + frac = perc % 100; + + seq_printf(m, "%s%d:\t%d.%d%%\n", + amdgpu_ip_name[i], + idx, perc/100, frac); + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h new file mode 100644 index 000000000000..41a4c7056729 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: David Nieto + * Roy Sun + */ +#ifndef __AMDGPU_SMI_H__ +#define __AMDGPU_SMI_H__ + +#include <linux/idr.h> +#include <linux/kfifo.h> +#include <linux/rbtree.h> +#include <drm/gpu_scheduler.h> +#include <drm/drm_file.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <linux/sched/mm.h> + +#include "amdgpu_sync.h" +#include "amdgpu_ring.h" +#include "amdgpu_ids.h" + +uint32_t amdgpu_get_ip_count(struct amdgpu_device *adev, int id); +void amdgpu_show_fdinfo(struct seq_file *m, struct file *f); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 377fe20bce23..3b7e86ea7167 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -34,9 +34,9 @@ #include <linux/kref.h> #include <linux/slab.h> #include <linux/firmware.h> +#include <linux/pm_runtime.h> -#include <drm/drm_debugfs.h> - +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_trace.h" @@ -129,32 +129,53 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * * @ring: ring the fence is associated with * @f: resulting fence object + * @job: job the fence is embedded in + * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, unsigned flags) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_fence *fence; + struct dma_fence *fence; + struct amdgpu_fence *am_fence; struct dma_fence __rcu **ptr; uint32_t seq; int r; - fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); - if (fence == NULL) - return -ENOMEM; + if (job == NULL) { + /* create a sperate hw fence */ + am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); + if (am_fence == NULL) + return -ENOMEM; + fence = &am_fence->base; + am_fence->ring = ring; + } else { + /* take use of job-embedded fence */ + fence = &job->hw_fence; + } seq = ++ring->fence_drv.sync_seq; - fence->ring = ring; - dma_fence_init(&fence->base, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, - seq); + if (job != NULL && job->job_run_counter) { + /* reinit seq for resubmitted jobs */ + fence->seqno = seq; + } else { + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, + seq); + } + + if (job != NULL) { + /* mark this fence has a parent job */ + set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags); + } + amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); - + pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { struct dma_fence *old; @@ -174,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, /* This function can't be called concurrently anyway, otherwise * emitting the fence would mess up the hardware ring buffer. */ - rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); + rcu_assign_pointer(*ptr, dma_fence_get(fence)); - *f = &fence->base; + *f = fence; return 0; } @@ -186,19 +207,28 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, * * @ring: ring the fence is associated with * @s: resulting sequence number + * @timeout: the timeout for waiting in usecs * * Emits a fence command on the requested ring (all asics). * Used For polling fence. * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) +int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, + uint32_t timeout) { uint32_t seq; + signed long r; if (!s) return -EINVAL; seq = ++ring->fence_drv.sync_seq; + r = amdgpu_fence_wait_polling(ring, + seq - ring->fence_drv.num_fences_mask, + timeout); + if (r < 1) + return -ETIMEDOUT; + amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, 0); @@ -234,8 +264,8 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) bool amdgpu_fence_process(struct amdgpu_ring *ring) { struct amdgpu_fence_driver *drv = &ring->fence_drv; + struct amdgpu_device *adev = ring->adev; uint32_t seq, last_seq; - int r; do { last_seq = atomic_read(&ring->fence_drv.last_seq); @@ -267,13 +297,10 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; - r = dma_fence_signal(fence); - if (!r) - DMA_FENCE_TRACE(fence, "signaled from irq context\n"); - else - BUG(); - + dma_fence_signal(fence); dma_fence_put(fence); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } while (last_seq != seq); return true; @@ -282,7 +309,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) /** * amdgpu_fence_fallback - fallback for hardware interrupts * - * @work: delayed work item + * @t: timer context used to obtain the pointer to ring structure * * Checks for fence activity. */ @@ -298,7 +325,6 @@ static void amdgpu_fence_fallback(struct timer_list *t) /** * amdgpu_fence_wait_empty - wait for all fences to signal * - * @adev: amdgpu device pointer * @ring: ring index the fence is associated with * * Wait for all fences on the requested ring to signal (all asics). @@ -404,15 +430,13 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); - amdgpu_irq_get(adev, irq_src, irq_type); ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; ring->fence_drv.initialized = true; - DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr " - "0x%016llx, cpu addr 0x%p\n", ring->name, - ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); + DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", + ring->name, ring->fence_drv.gpu_addr); return 0; } @@ -422,12 +446,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, * * @ring: ring to init the fence driver on * @num_hw_submission: number of entries on the hardware queue + * @sched_score: optional score atomic shared with other schedulers * * Init the fence driver for the requested ring (all asics). * Helper function for amdgpu_fence_driver_init(). */ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, - unsigned num_hw_submission) + unsigned num_hw_submission, + atomic_t *sched_score) { struct amdgpu_device *adev = ring->adev; long timeout; @@ -436,8 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, if (!adev) return -EINVAL; - /* Check that num_hw_submission is a power of two */ - if ((num_hw_submission & (num_hw_submission - 1)) != 0) + if (!is_power_of_2(num_hw_submission)) return -EINVAL; ring->fence_drv.cpu_addr = NULL; @@ -455,38 +480,39 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, if (!ring->fence_drv.fences) return -ENOMEM; - /* No need to setup the GPU scheduler for KIQ ring */ - if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { - switch (ring->funcs->type) { - case AMDGPU_RING_TYPE_GFX: - timeout = adev->gfx_timeout; - break; - case AMDGPU_RING_TYPE_COMPUTE: - timeout = adev->compute_timeout; - break; - case AMDGPU_RING_TYPE_SDMA: - timeout = adev->sdma_timeout; - break; - default: - timeout = adev->video_timeout; - break; - } + /* No need to setup the GPU scheduler for rings that don't need it */ + if (ring->no_scheduler) + return 0; - r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, - num_hw_submission, amdgpu_job_hang_limit, - timeout, ring->name); - if (r) { - DRM_ERROR("Failed to create scheduler on ring %s.\n", - ring->name); - return r; - } + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_GFX: + timeout = adev->gfx_timeout; + break; + case AMDGPU_RING_TYPE_COMPUTE: + timeout = adev->compute_timeout; + break; + case AMDGPU_RING_TYPE_SDMA: + timeout = adev->sdma_timeout; + break; + default: + timeout = adev->video_timeout; + break; + } + + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, + num_hw_submission, amdgpu_job_hang_limit, + timeout, NULL, sched_score, ring->name); + if (r) { + DRM_ERROR("Failed to create scheduler on ring %s.\n", + ring->name); + return r; } return 0; } /** - * amdgpu_fence_driver_init - init the fence driver + * amdgpu_fence_driver_sw_init - init the fence driver * for all possible rings. * * @adev: amdgpu device pointer @@ -497,92 +523,83 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, * amdgpu_fence_driver_start_ring(). * Returns 0 for success. */ -int amdgpu_fence_driver_init(struct amdgpu_device *adev) +int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) { - if (amdgpu_debugfs_fence_init(adev)) - dev_err(adev->dev, "fence debugfs file creation failed\n"); - return 0; } /** - * amdgpu_fence_driver_fini - tear down the fence driver + * amdgpu_fence_driver_hw_fini - tear down the fence driver * for all possible rings. * * @adev: amdgpu device pointer * * Tear down the fence driver for all possible rings (all asics). */ -void amdgpu_fence_driver_fini(struct amdgpu_device *adev) +void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) { - unsigned i, j; - int r; + int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->fence_drv.initialized) continue; - r = amdgpu_fence_wait_empty(ring); - if (r) { - /* no need to trigger GPU reset as we are unloading */ + + if (!ring->no_scheduler) + drm_sched_stop(&ring->sched, NULL); + + /* You can't wait for HW to signal if it's gone */ + if (!drm_dev_is_unplugged(adev_to_drm(adev))) + r = amdgpu_fence_wait_empty(ring); + else + r = -ENODEV; + /* no need to trigger GPU reset as we are unloading */ + if (r) amdgpu_fence_driver_force_completion(ring); - } - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - drm_sched_fini(&ring->sched); + + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); + del_timer_sync(&ring->fence_drv.fallback_timer); - for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) - dma_fence_put(ring->fence_drv.fences[j]); - kfree(ring->fence_drv.fences); - ring->fence_drv.fences = NULL; - ring->fence_drv.initialized = false; } } -/** - * amdgpu_fence_driver_suspend - suspend the fence driver - * for all possible rings. - * - * @adev: amdgpu device pointer - * - * Suspend the fence driver for all possible rings (all asics). - */ -void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) +void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) { - int i, r; + unsigned int i, j; for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring || !ring->fence_drv.initialized) continue; - /* wait for gpu to finish processing current batch */ - r = amdgpu_fence_wait_empty(ring); - if (r) { - /* delay GPU reset to resume */ - amdgpu_fence_driver_force_completion(ring); - } + if (!ring->no_scheduler) + drm_sched_fini(&ring->sched); - /* disable the interrupt */ - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) + dma_fence_put(ring->fence_drv.fences[j]); + kfree(ring->fence_drv.fences); + ring->fence_drv.fences = NULL; + ring->fence_drv.initialized = false; } } /** - * amdgpu_fence_driver_resume - resume the fence driver + * amdgpu_fence_driver_hw_init - enable the fence driver * for all possible rings. * * @adev: amdgpu device pointer * - * Resume the fence driver for all possible rings (all asics). + * Enable the fence driver for all possible rings (all asics). * Not all asics have all rings, so each asic will only * start the fence driver on the rings it has using * amdgpu_fence_driver_start_ring(). * Returns 0 for success. */ -void amdgpu_fence_driver_resume(struct amdgpu_device *adev) +void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) { int i; @@ -591,9 +608,15 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; + if (!ring->no_scheduler) { + drm_sched_resubmit_jobs(&ring->sched); + drm_sched_start(&ring->sched, true); + } + /* enable the interrupt */ - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } @@ -620,13 +643,21 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) { - struct amdgpu_fence *fence = to_amdgpu_fence(f); - return (const char *)fence->ring->name; + struct amdgpu_ring *ring; + + if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + + ring = to_amdgpu_ring(job->base.sched); + } else { + ring = to_amdgpu_fence(f)->ring; + } + return (const char *)ring->name; } /** * amdgpu_fence_enable_signaling - enable signalling on fence - * @fence: fence + * @f: fence * * This function is called with fence_queue lock held, and adds a callback * to fence_queue that checks if this fence is signaled, and if so it @@ -634,14 +665,19 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) */ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) { - struct amdgpu_fence *fence = to_amdgpu_fence(f); - struct amdgpu_ring *ring = fence->ring; + struct amdgpu_ring *ring; + + if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + + ring = to_amdgpu_ring(job->base.sched); + } else { + ring = to_amdgpu_fence(f)->ring; + } if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring); - DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); - return true; } @@ -655,14 +691,26 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) static void amdgpu_fence_free(struct rcu_head *rcu) { struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - struct amdgpu_fence *fence = to_amdgpu_fence(f); - kmem_cache_free(amdgpu_fence_slab, fence); + + if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { + /* free job if fence has a parent job */ + struct amdgpu_job *job; + + job = container_of(f, struct amdgpu_job, hw_fence); + kfree(job); + } else { + /* free fence_slab if it's separated fence*/ + struct amdgpu_fence *fence; + + fence = to_amdgpu_fence(f); + kmem_cache_free(amdgpu_fence_slab, fence); + } } /** * amdgpu_fence_release - callback that fence can be freed * - * @fence: fence + * @f: fence * * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. @@ -679,15 +727,14 @@ static const struct dma_fence_ops amdgpu_fence_ops = { .release = amdgpu_fence_release, }; + /* * Fence debugfs */ #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) +static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; int i; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -727,41 +774,49 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) return 0; } -/** +/* * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover * * Manually trigger a gpu reset at the next fence wait. */ -static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) +static int gpu_recover_get(void *data, u64 *val) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = (struct amdgpu_device *)data; + struct drm_device *dev = adev_to_drm(adev); + int r; + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); + return 0; + } + + *val = amdgpu_device_gpu_recover(adev, NULL); - seq_printf(m, "gpu recover\n"); - amdgpu_device_gpu_recover(adev, NULL); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); return 0; } -static const struct drm_info_list amdgpu_debugfs_fence_list[] = { - {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, - {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL} -}; +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info); +DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL, + "%lld\n"); -static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = { - {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, -}; #endif -int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) +void amdgpu_debugfs_fence_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - if (amdgpu_sriov_vf(adev)) - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1); - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); -#else - return 0; + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_fence_info", 0444, root, adev, + &amdgpu_debugfs_fence_info_fops); + + if (!amdgpu_sriov_vf(adev)) + debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev, + &amdgpu_debugfs_gpu_recover_fops); #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c new file mode 100644 index 000000000000..7709caeb233d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -0,0 +1,191 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_i2c.h" +#include "smu_v11_0_i2c.h" +#include "atom.h" +#include "amdgpu_fru_eeprom.h" +#include "amdgpu_eeprom.h" + +#define FRU_EEPROM_MADDR 0x60000 +#define I2C_PRODUCT_INFO_OFFSET 0xC0 + +static bool is_fru_eeprom_supported(struct amdgpu_device *adev) +{ + /* Only server cards have the FRU EEPROM + * TODO: See if we can figure this out dynamically instead of + * having to parse VBIOS versions. + */ + struct atom_context *atom_ctx = adev->mode_info.atom_context; + + /* VBIOS is of the format ###-DXXXYY-##. For SKU identification, + * we can use just the "DXXX" portion. If there were more models, we + * could convert the 3 characters to a hex integer and use a switch + * for ease/speed/readability. For now, 2 string comparisons are + * reasonable and not too expensive + */ + switch (adev->asic_type) { + case CHIP_VEGA20: + /* D161 and D163 are the VG20 server SKUs */ + if (strnstr(atom_ctx->vbios_version, "D161", + sizeof(atom_ctx->vbios_version)) || + strnstr(atom_ctx->vbios_version, "D163", + sizeof(atom_ctx->vbios_version))) + return true; + else + return false; + default: + return false; + } +} + +static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, + unsigned char *buff) +{ + int ret, size; + + ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr, buff, 1); + if (ret < 1) { + DRM_WARN("FRU: Failed to get size field"); + return ret; + } + + /* The size returned by the i2c requires subtraction of 0xC0 since the + * size apparently always reports as 0xC0+actual size. + */ + size = buff[0] - I2C_PRODUCT_INFO_OFFSET; + + ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr + 1, buff, size); + if (ret < 1) { + DRM_WARN("FRU: Failed to get data field"); + return ret; + } + + return size; +} + +int amdgpu_fru_get_product_info(struct amdgpu_device *adev) +{ + unsigned char buff[34]; + u32 addrptr; + int size, len; + + if (!is_fru_eeprom_supported(adev)) + return 0; + + /* If algo exists, it means that the i2c_adapter's initialized */ + if (!adev->pm.smu_i2c.algo) { + DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); + return -ENODEV; + } + + /* There's a lot of repetition here. This is due to the FRU having + * variable-length fields. To get the information, we have to find the + * size of each field, and then keep reading along and reading along + * until we get all of the data that we want. We use addrptr to track + * the address as we go + */ + + /* The first fields are all of size 1-byte, from 0-7 are offsets that + * contain information that isn't useful to us. + * Bytes 8-a are all 1-byte and refer to the size of the entire struct, + * and the language field, so just start from 0xb, manufacturer size + */ + addrptr = FRU_EEPROM_MADDR + 0xb; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + if (size < 1) { + DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size); + return -EINVAL; + } + + /* Increment the addrptr by the size of the field, and 1 due to the + * size field being 1 byte. This pattern continues below. + */ + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + if (size < 1) { + DRM_ERROR("Failed to read FRU product name, ret:%d", size); + return -EINVAL; + } + + len = size; + /* Product name should only be 32 characters. Any more, + * and something could be wrong. Cap it at 32 to be safe + */ + if (len >= sizeof(adev->product_name)) { + DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake"); + len = sizeof(adev->product_name) - 1; + } + /* Start at 2 due to buff using fields 0 and 1 for the address */ + memcpy(adev->product_name, &buff[2], len); + adev->product_name[len] = '\0'; + + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + if (size < 1) { + DRM_ERROR("Failed to read FRU product number, ret:%d", size); + return -EINVAL; + } + + len = size; + /* Product number should only be 16 characters. Any more, + * and something could be wrong. Cap it at 16 to be safe + */ + if (len >= sizeof(adev->product_number)) { + DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake"); + len = sizeof(adev->product_number) - 1; + } + memcpy(adev->product_number, &buff[2], len); + adev->product_number[len] = '\0'; + + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + + if (size < 1) { + DRM_ERROR("Failed to read FRU product version, ret:%d", size); + return -EINVAL; + } + + addrptr += size + 1; + size = amdgpu_fru_read_eeprom(adev, addrptr, buff); + + if (size < 1) { + DRM_ERROR("Failed to read FRU serial number, ret:%d", size); + return -EINVAL; + } + + len = size; + /* Serial number should only be 16 characters. Any more, + * and something could be wrong. Cap it at 16 to be safe + */ + if (len >= sizeof(adev->serial)) { + DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake"); + len = sizeof(adev->serial) - 1; + } + memcpy(adev->serial, &buff[2], len); + adev->serial[len] = '\0'; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h new file mode 100644 index 000000000000..1308d976d60e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_FRU_EEPROM_H__ +#define __AMDGPU_FRU_EEPROM_H__ + +int amdgpu_fru_get_product_info(struct amdgpu_device *adev); + +#endif // __AMDGPU_FRU_EEPROM_H__ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c new file mode 100644 index 000000000000..2ca3c329de6d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c @@ -0,0 +1,143 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/debugfs.h> +#include <linux/firmware.h> +#include <linux/dma-mapping.h> + +#include "amdgpu.h" +#include "amdgpu_fw_attestation.h" +#include "amdgpu_psp.h" +#include "amdgpu_ucode.h" +#include "soc15_common.h" + +#define FW_ATTESTATION_DB_COOKIE 0x143b6a37 +#define FW_ATTESTATION_RECORD_VALID 1 +#define FW_ATTESTATION_MAX_SIZE 4096 + +typedef struct FW_ATT_DB_HEADER +{ + uint32_t AttDbVersion; /* version of the fwar feature */ + uint32_t AttDbCookie; /* cookie as an extra check for corrupt data */ +} FW_ATT_DB_HEADER; + +typedef struct FW_ATT_RECORD +{ + uint16_t AttFwIdV1; /* Legacy FW Type field */ + uint16_t AttFwIdV2; /* V2 FW ID field */ + uint32_t AttFWVersion; /* FW Version */ + uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */ + uint8_t AttSource; /* FW source indicator */ + uint8_t RecordValid; /* Indicates whether the record is a valid entry */ + uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */ +} FW_ATT_RECORD; + +static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, + char __user *buf, + size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + uint64_t records_addr = 0; + uint64_t vram_pos = 0; + FW_ATT_DB_HEADER fw_att_hdr = {0}; + FW_ATT_RECORD fw_att_record = {0}; + + if (size < sizeof(FW_ATT_RECORD)) { + DRM_WARN("FW attestation input buffer not enough memory"); + return -EINVAL; + } + + if ((*pos + sizeof(FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) { + DRM_WARN("FW attestation out of bounds"); + return 0; + } + + if (psp_get_fw_attestation_records_addr(&adev->psp, &records_addr)) { + DRM_WARN("Failed to get FW attestation record address"); + return -EINVAL; + } + + vram_pos = records_addr - adev->gmc.vram_start; + + if (*pos == 0) { + amdgpu_device_vram_access(adev, + vram_pos, + (uint32_t*)&fw_att_hdr, + sizeof(FW_ATT_DB_HEADER), + false); + + if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) { + DRM_WARN("Invalid FW attestation cookie"); + return -EINVAL; + } + + DRM_INFO("FW attestation version = 0x%X", fw_att_hdr.AttDbVersion); + } + + amdgpu_device_vram_access(adev, + vram_pos + sizeof(FW_ATT_DB_HEADER) + *pos, + (uint32_t*)&fw_att_record, + sizeof(FW_ATT_RECORD), + false); + + if (fw_att_record.RecordValid != FW_ATTESTATION_RECORD_VALID) + return 0; + + if (copy_to_user(buf, (void*)&fw_att_record, sizeof(FW_ATT_RECORD))) + return -EINVAL; + + *pos += sizeof(FW_ATT_RECORD); + + return sizeof(FW_ATT_RECORD); +} + +static const struct file_operations amdgpu_fw_attestation_debugfs_ops = { + .owner = THIS_MODULE, + .read = amdgpu_fw_attestation_debugfs_read, + .write = NULL, + .llseek = default_llseek +}; + +static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return 0; + + if (adev->asic_type >= CHIP_SIENNA_CICHLID) + return 1; + + return 0; +} + +void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev) +{ + if (!amdgpu_is_fw_attestation_supported(adev)) + return; + + debugfs_create_file("amdgpu_fw_attestation", + S_IRUSR, + adev_to_drm(adev)->primary->debugfs_root, + adev, + &amdgpu_fw_attestation_debugfs_ops); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.h new file mode 100644 index 000000000000..90af4fe58c99 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _AMDGPU_FW_ATTESTATION_H +#define _AMDGPU_FW_ATTESTATION_H + +#include "amdgpu.h" + +void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 19705e399905..d3e4203f6217 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -34,6 +34,7 @@ #include <asm/set_memory.h> #endif #include "amdgpu.h" +#include <drm/drm_drv.h> /* * GART @@ -60,7 +61,7 @@ */ /** - * amdgpu_dummy_page_init - init dummy page used by the driver + * amdgpu_gart_dummy_page_init - init dummy page used by the driver * * @adev: amdgpu_device pointer * @@ -71,13 +72,13 @@ */ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) { - struct page *dummy_page = ttm_bo_glob.dummy_read_page; + struct page *dummy_page = ttm_glob.dummy_read_page; if (adev->dummy_page_addr) return 0; - adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) { + adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) { dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); adev->dummy_page_addr = 0; return -ENOMEM; @@ -86,18 +87,18 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) } /** - * amdgpu_dummy_page_fini - free dummy page used by the driver + * amdgpu_gart_dummy_page_fini - free dummy page used by the driver * * @adev: amdgpu_device pointer * * Frees the dummy page used by the driver (all asics). */ -static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) +void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) { if (!adev->dummy_page_addr) return; - pci_unmap_page(adev->pdev, adev->dummy_page_addr, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); adev->dummy_page_addr = 0; } @@ -126,6 +127,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + r = amdgpu_bo_create(adev, &bp, &adev->gart.bo); if (r) { return r; @@ -202,6 +205,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) return; } amdgpu_bo_unref(&adev->gart.bo); + adev->gart.ptr = NULL; } /* @@ -227,18 +231,19 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, u64 page_base; /* Starting from VEGA10, system bit must be 0 to mean invalid. */ uint64_t flags = 0; + int idx; if (!adev->gart.ready) { WARN(1, "trying to unbind memory from uninitialized GART !\n"); return -EINVAL; } + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return 0; + t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - adev->gart.pages[p] = NULL; -#endif page_base = adev->dummy_page_addr; if (!adev->gart.ptr) continue; @@ -250,10 +255,11 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, } } mb(); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); + drm_dev_exit(idx); return 0; } @@ -276,12 +282,16 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, { uint64_t page_base; unsigned i, j, t; + int idx; if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; } + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return 0; + t = offset / AMDGPU_GPU_PAGE_SIZE; for (i = 0; i < pages; i++) { @@ -291,6 +301,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, page_base += AMDGPU_GPU_PAGE_SIZE; } } + drm_dev_exit(idx); return 0; } @@ -300,47 +311,45 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, * @adev: amdgpu_device pointer * @offset: offset into the GPU's gart aperture * @pages: number of pages to bind - * @pagelist: pages to bind * @dma_addr: DMA addresses of pages + * @flags: page table entry flags * * Binds the requested pages to the gart page table * (all asics). * Returns 0 for success, -EINVAL for failure. */ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, dma_addr_t *dma_addr, + int pages, dma_addr_t *dma_addr, uint64_t flags) { -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - unsigned t,p; -#endif - int r, i; - if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; } -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - t = offset / AMDGPU_GPU_PAGE_SIZE; - p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; - for (i = 0; i < pages; i++, p++) - adev->gart.pages[p] = pagelist ? pagelist[i] : NULL; -#endif - if (!adev->gart.ptr) return 0; - r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, - adev->gart.ptr); - if (r) - return r; + return amdgpu_gart_map(adev, offset, pages, dma_addr, flags, + adev->gart.ptr); +} + +/** + * amdgpu_gart_invalidate_tlb - invalidate gart TLB + * + * @adev: amdgpu device driver pointer + * + * Invalidate gart TLB which can be use as a way to flush gart changes + * + */ +void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev) +{ + int i; mb(); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); - return 0; } /** @@ -372,29 +381,5 @@ int amdgpu_gart_init(struct amdgpu_device *adev) DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - /* Allocate pages table */ - adev->gart.pages = vzalloc(array_size(sizeof(void *), - adev->gart.num_cpu_pages)); - if (adev->gart.pages == NULL) - return -ENOMEM; -#endif - return 0; } - -/** - * amdgpu_gart_fini - tear down the driver info for managing the gart - * - * @adev: amdgpu_device pointer - * - * Tear down the gart driver info and free the dummy page (all asics). - */ -void amdgpu_gart_fini(struct amdgpu_device *adev) -{ -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - vfree(adev->gart.pages); - adev->gart.pages = NULL; -#endif - amdgpu_gart_dummy_page_fini(adev); -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index afa2e2877d87..78895413cf9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -46,9 +46,6 @@ struct amdgpu_gart { unsigned num_gpu_pages; unsigned num_cpu_pages; unsigned table_size; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - struct page **pages; -#endif bool ready; /* Asic default pte flags */ @@ -60,14 +57,13 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); int amdgpu_gart_init(struct amdgpu_device *adev); -void amdgpu_gart_fini(struct amdgpu_device *adev); +void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev); int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages); int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags, void *dst); int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, - dma_addr_t *dma_addr, uint64_t flags); - + int pages, dma_addr_t *dma_addr, uint64_t flags); +void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 4277125a79ee..a573424a6e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -29,15 +29,59 @@ #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pci.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> -#include <drm/drm_debugfs.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_ttm_helper.h> #include "amdgpu.h" #include "amdgpu_display.h" +#include "amdgpu_dma_buf.h" #include "amdgpu_xgmi.h" -void amdgpu_gem_object_free(struct drm_gem_object *gobj) +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; + +static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; + struct drm_device *ddev = bo->base.dev; + vm_fault_t ret; + int idx; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (drm_dev_enter(ddev, &idx)) { + ret = amdgpu_bo_fault_reserve_notify(bo); + if (ret) { + drm_dev_exit(idx); + goto unlock; + } + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT, 1); + drm_dev_exit(idx); + } else { + ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +unlock: + dma_resv_unlock(bo->base.resv); + return ret; +} + +static const struct vm_operations_struct amdgpu_gem_vm_ops = { + .fault = amdgpu_gem_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + +static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); @@ -54,6 +98,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, struct drm_gem_object **obj) { struct amdgpu_bo *bo; + struct amdgpu_bo_user *ubo; struct amdgpu_bo_param bp; int r; @@ -65,34 +110,24 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.type = type; bp.resv = resv; bp.preferred_domain = initial_domain; -retry: bp.flags = flags; bp.domain = initial_domain; - r = amdgpu_bo_create(adev, &bp, &bo); - if (r) { - if (r != -ERESTARTSYS) { - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - goto retry; - } + bp.bo_ptr_size = sizeof(struct amdgpu_bo); - if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { - initial_domain |= AMDGPU_GEM_DOMAIN_GTT; - goto retry; - } - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", - size, initial_domain, alignment, r); - } + r = amdgpu_bo_create_user(adev, &bp, &ubo); + if (r) return r; - } + + bo = &ubo->bo; *obj = &bo->tbo.base; + (*obj)->funcs = &amdgpu_gem_object_funcs; return 0; } void amdgpu_gem_force_release(struct amdgpu_device *adev) { - struct drm_device *ddev = adev->ddev; + struct drm_device *ddev = adev_to_drm(adev); struct drm_file *file; mutex_lock(&ddev->filelist_mutex); @@ -105,7 +140,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, handle) { WARN_ONCE(1, "And also active allocations!\n"); - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); } idr_destroy(&file->object_idr); spin_unlock(&file->table_lock); @@ -118,8 +153,8 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) * Call from drm_gem_handle_create which appear in both new and open ioctl * case. */ -int amdgpu_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv) +static int amdgpu_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); @@ -134,7 +169,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, return -EPERM; if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && - abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) + abo->tbo.base.resv != vm->root.bo->tbo.base.resv) return -EPERM; r = amdgpu_bo_reserve(abo, false); @@ -151,8 +186,8 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, return 0; } -void amdgpu_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv) +static void amdgpu_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); @@ -161,16 +196,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_bo_list_entry vm_pd; struct list_head list, duplicates; + struct dma_fence *fence = NULL; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct amdgpu_bo_va *bo_va; - int r; + long r; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); tv.bo = &bo->tbo; - tv.num_shared = 1; + tv.num_shared = 2; list_add(&tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); @@ -178,38 +214,76 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); if (r) { dev_err(adev->dev, "leaking bo va because " - "we fail to reserve bo (%d)\n", r); + "we fail to reserve bo (%ld)\n", r); return; } bo_va = amdgpu_vm_bo_find(vm, bo); - if (bo_va && --bo_va->ref_count == 0) { - amdgpu_vm_bo_rmv(adev, bo_va); + if (!bo_va || --bo_va->ref_count) + goto out_unlock; - if (amdgpu_vm_ready(vm)) { - struct dma_fence *fence = NULL; - - r = amdgpu_vm_clear_freed(adev, vm, &fence); - if (unlikely(r)) { - dev_err(adev->dev, "failed to clear page " - "tables on GEM object close (%d)\n", r); - } + amdgpu_vm_bo_rmv(adev, bo_va); + if (!amdgpu_vm_ready(vm)) + goto out_unlock; - if (fence) { - amdgpu_bo_fence(bo, fence, true); - dma_fence_put(fence); - } - } + fence = dma_resv_excl_fence(bo->tbo.base.resv); + if (fence) { + amdgpu_bo_fence(bo, fence, true); + fence = NULL; } + + r = amdgpu_vm_clear_freed(adev, vm, &fence); + if (r || !fence) + goto out_unlock; + + amdgpu_bo_fence(bo, fence, true); + dma_fence_put(fence); + +out_unlock: + if (unlikely(r < 0)) + dev_err(adev->dev, "failed to clear page " + "tables on GEM object close (%ld)\n", r); ttm_eu_backoff_reservation(&ticket, &list); } +static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) + return -EPERM; + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + return -EPERM; + + /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings + * for debugger access to invisible VRAM. Should have used MAP_SHARED + * instead. Clearing VM_MAYWRITE prevents the mapping from ever + * becoming writable and makes is_cow_mapping(vm_flags) false. + */ + if (is_cow_mapping(vma->vm_flags) && + !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + vma->vm_flags &= ~VM_MAYWRITE; + + return drm_gem_ttm_mmap(obj, vma); +} + +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { + .free = amdgpu_gem_object_free, + .open = amdgpu_gem_object_open, + .close = amdgpu_gem_object_close, + .export = amdgpu_gem_prime_export, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, + .mmap = amdgpu_gem_object_mmap, + .vm_ops = &amdgpu_gem_vm_ops, +}; + /* * GEM ioctls. */ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; union drm_amdgpu_gem_create *args = data; @@ -217,7 +291,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, uint64_t size = args->in.bo_size; struct dma_resv *resv = NULL; struct drm_gem_object *gobj; - uint32_t handle; + uint32_t handle, initial_domain; int r; /* reject invalid gem flags */ @@ -226,7 +300,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_VRAM_CLEARED | AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | - AMDGPU_GEM_CREATE_EXPLICIT_SYNC)) + AMDGPU_GEM_CREATE_EXPLICIT_SYNC | + AMDGPU_GEM_CREATE_ENCRYPTED)) return -EINVAL; @@ -234,6 +309,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) return -EINVAL; + if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) { + DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n"); + return -EINVAL; + } + /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { @@ -248,30 +328,46 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, } if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { - r = amdgpu_bo_reserve(vm->root.base.bo, false); + r = amdgpu_bo_reserve(vm->root.bo, false); if (r) return r; - resv = vm->root.base.bo->tbo.base.resv; + resv = vm->root.bo->tbo.base.resv; } + initial_domain = (u32)(0xffffffff & args->in.domains); +retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, - (u32)(0xffffffff & args->in.domains), + initial_domain, flags, ttm_bo_type_device, resv, &gobj); + if (r && r != -ERESTARTSYS) { + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + goto retry; + } + + if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { + initial_domain |= AMDGPU_GEM_DOMAIN_GTT; + goto retry; + } + DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", + size, initial_domain, args->in.alignment, r); + } + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { if (!r) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - abo->parent = amdgpu_bo_ref(vm->root.base.bo); + abo->parent = amdgpu_bo_ref(vm->root.bo); } - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); } if (r) return r; r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); if (r) return r; @@ -284,7 +380,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct ttm_operation_ctx ctx = { true, false }; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_userptr *args = data; struct drm_gem_object *gobj; struct amdgpu_bo *bo; @@ -318,7 +414,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, bo = gem_to_amdgpu_bo(gobj); bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; - r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); if (r) goto release_object; @@ -355,7 +451,7 @@ user_pages_done: amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); release_object: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -374,11 +470,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, robj = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return -EPERM; } *offset_p = amdgpu_bo_mmap_offset(robj); - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return 0; } @@ -435,8 +531,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, - timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled @@ -448,7 +543,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } else r = ret; - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -491,7 +586,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, unreserve: amdgpu_bo_unreserve(robj); out: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -522,7 +617,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (operation == AMDGPU_VA_OP_MAP || operation == AMDGPU_VA_OP_REPLACE) { - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) goto error; } @@ -573,7 +668,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_amdgpu_gem_va *args = data; struct drm_gem_object *gobj; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *abo; struct amdgpu_bo_va *bo_va; @@ -582,10 +677,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct ww_acquire_ctx ticket; struct list_head list, duplicates; uint64_t va_flags; + uint64_t vm_size; int r = 0; if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in reserved area 0x%LX\n", args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; @@ -593,7 +689,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address >= AMDGPU_GMC_HOLE_START && args->va_address < AMDGPU_GMC_HOLE_END) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", args->va_address, AMDGPU_GMC_HOLE_START, AMDGPU_GMC_HOLE_END); @@ -602,8 +698,17 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->va_address &= AMDGPU_GMC_HOLE_MASK; + vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; + vm_size -= AMDGPU_VA_RESERVED_SIZE; + if (args->va_address + args->map_size > vm_size) { + dev_dbg(dev->dev, + "va_address 0x%llx is in top reserved area 0x%llx\n", + args->va_address + args->map_size, vm_size); + return -EINVAL; + } + if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { - dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", + dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", args->flags); return -EINVAL; } @@ -615,7 +720,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, case AMDGPU_VA_OP_REPLACE: break; default: - dev_dbg(&dev->pdev->dev, "unsupported operation %d\n", + dev_dbg(dev->dev, "unsupported operation %d\n", args->operation); return -EINVAL; } @@ -690,14 +795,14 @@ error_backoff: ttm_eu_backoff_reservation(&ticket, &list); error_unref: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_op *args = data; struct drm_gem_object *gobj; struct amdgpu_vm_bo_base *base; @@ -720,7 +825,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, void __user *out = u64_to_user_ptr(args->value); info.bo_size = robj->tbo.base.size; - info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; + info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; info.domains = robj->preferred_domains; info.domain_flags = robj->flags; amdgpu_bo_unreserve(robj); @@ -729,7 +834,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) { + if (robj->tbo.base.import_attach && + args->value & AMDGPU_GEM_DOMAIN_VRAM) { r = -EINVAL; amdgpu_bo_unreserve(robj); break; @@ -741,7 +847,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } for (base = robj->vm_bo; base; base = base->next) if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), - amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { + amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { r = -EINVAL; amdgpu_bo_unreserve(robj); goto out; @@ -766,7 +872,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } out: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -774,7 +880,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_gem_object *gobj; uint32_t handle; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | @@ -794,7 +900,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, DIV_ROUND_UP(args->bpp, 8), 0); args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); - domain = amdgpu_bo_get_preferred_pin_domain(adev, + domain = amdgpu_bo_get_preferred_domain(adev, amdgpu_display_supported_domains(adev, flags)); r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, ttm_bo_type_device, NULL, &gobj); @@ -803,7 +909,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); if (r) { return r; } @@ -812,70 +918,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, } #if defined(CONFIG_DEBUG_FS) - -#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \ - if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ - seq_printf((m), " " #flag); \ - } - -static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) +static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused) { - struct drm_gem_object *gobj = ptr; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); - struct seq_file *m = data; - - struct dma_buf_attachment *attachment; - struct dma_buf *dma_buf; - unsigned domain; - const char *placement; - unsigned pin_count; - - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: - placement = "VRAM"; - break; - case AMDGPU_GEM_DOMAIN_GTT: - placement = " GTT"; - break; - case AMDGPU_GEM_DOMAIN_CPU: - default: - placement = " CPU"; - break; - } - seq_printf(m, "\t0x%08x: %12ld byte %s", - id, amdgpu_bo_size(bo), placement); - - pin_count = READ_ONCE(bo->pin_count); - if (pin_count) - seq_printf(m, " pin count %d", pin_count); - - dma_buf = READ_ONCE(bo->tbo.base.dma_buf); - attachment = READ_ONCE(bo->tbo.base.import_attach); - - if (attachment) - seq_printf(m, " imported from %p", dma_buf); - else if (dma_buf) - seq_printf(m, " exported as %p", dma_buf); - - amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); - amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS); - amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC); - amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED); - amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW); - amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS); - amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID); - amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC); - - seq_printf(m, "\n"); - - return 0; -} - -static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct drm_device *dev = adev_to_drm(adev); struct drm_file *file; int r; @@ -885,6 +931,8 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) list_for_each_entry(file, &dev->filelist, lhead) { struct task_struct *task; + struct drm_gem_object *gobj; + int id; /* * Although we have a valid reference on file->pid, that does @@ -899,7 +947,11 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) rcu_read_unlock(); spin_lock(&file->table_lock); - idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); + idr_for_each_entry(&file->object_idr, gobj, id) { + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + + amdgpu_bo_print_info(id, bo, m); + } spin_unlock(&file->table_lock); } @@ -907,15 +959,17 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) return 0; } -static const struct drm_info_list amdgpu_debugfs_gem_list[] = { - {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, -}; +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info); + #endif -int amdgpu_debugfs_gem_init(struct amdgpu_device *adev) +void amdgpu_debugfs_gem_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_gem_info", 0444, root, adev, + &amdgpu_debugfs_gem_info_fops); #endif - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index e0f025dd1b14..637bf51dbf06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -33,11 +33,6 @@ #define AMDGPU_GEM_DOMAIN_MAX 0x3 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base) -void amdgpu_gem_object_free(struct drm_gem_object *obj); -int amdgpu_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv); -void amdgpu_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv); unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e00b46180d2e..1916ec84dd71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -31,6 +31,8 @@ /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) +#define GFX_OFF_NO_DELAY 0 + /* * GPU GFX IP block helpers function. */ @@ -48,7 +50,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, return bit; } -void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit, +void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue) { *queue = bit % adev->gfx.mec.num_queue_per_pipe; @@ -192,42 +194,44 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) return adev->gfx.mec.num_mec > 1; } -void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring) { - int i, queue, pipe, mec; - bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); + /* Policy: use 1st queue as high priority compute queue if we + * have more than one compute queue. + */ + if (adev->gfx.num_compute_rings > 1 && + ring == &adev->gfx.compute_ring[0]) + return true; - /* policy for amdgpu compute queue ownership */ - for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - queue = i % adev->gfx.mec.num_queue_per_pipe; - pipe = (i / adev->gfx.mec.num_queue_per_pipe) - % adev->gfx.mec.num_pipe_per_mec; - mec = (i / adev->gfx.mec.num_queue_per_pipe) - / adev->gfx.mec.num_pipe_per_mec; - - /* we've run out of HW */ - if (mec >= adev->gfx.mec.num_mec) - break; + return false; +} - if (multipipe_policy) { - /* policy: amdgpu owns the first two queues of the first MEC */ - if (mec == 0 && queue < 2) - set_bit(i, adev->gfx.mec.queue_bitmap); - } else { - /* policy: amdgpu owns all queues in the first pipe */ - if (mec == 0 && pipe == 0) - set_bit(i, adev->gfx.mec.queue_bitmap); +void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) +{ + int i, queue, pipe; + bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); + int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe, + adev->gfx.num_compute_rings); + + if (multipipe_policy) { + /* policy: make queues evenly cross all pipes on MEC1 only */ + for (i = 0; i < max_queues_per_mec; i++) { + pipe = i % adev->gfx.mec.num_pipe_per_mec; + queue = (i / adev->gfx.mec.num_pipe_per_mec) % + adev->gfx.mec.num_queue_per_pipe; + + set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, + adev->gfx.mec.queue_bitmap); } + } else { + /* policy: amdgpu owns all queues in the given pipe */ + for (i = 0; i < max_queues_per_mec; ++i) + set_bit(i, adev->gfx.mec.queue_bitmap); } - /* update the number of active compute rings */ - adev->gfx.num_compute_rings = - bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); - - /* If you hit this case and edited the policy, you probably just - * need to increase AMDGPU_MAX_COMPUTE_RINGS */ - if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); } void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) @@ -266,7 +270,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) continue; - amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); + amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); /* * 1. Using pipes 2/3 from MEC 2 seems cause problems. @@ -296,10 +300,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, spin_lock_init(&kiq->ring_lock); - r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); - if (r) - return r; - ring->adev = NULL; ring->ring_obj = NULL; ring->use_doorbell = true; @@ -310,9 +310,10 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, return r; ring->eop_gpu_addr = kiq->eop_gpu_addr; + ring->no_scheduler = true; sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); - r = amdgpu_ring_init(adev, ring, 1024, - irq, AMDGPU_CP_KIQ_IRQ_DRIVER0); + r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); @@ -321,7 +322,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { - amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); amdgpu_ring_fini(ring); } @@ -464,20 +464,38 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) { struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *kiq_ring = &kiq->ring; - int i; + int i, r; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + spin_lock(&adev->gfx.kiq.ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * - adev->gfx.num_compute_rings)) + adev->gfx.num_compute_rings)) { + spin_unlock(&adev->gfx.kiq.ring_lock); return -ENOMEM; + } for (i = 0; i < adev->gfx.num_compute_rings; i++) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], RESET_QUEUES, 0, 0); + r = amdgpu_ring_test_helper(kiq_ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + + return r; +} + +int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, + int queue_bit) +{ + int mec, pipe, queue; + int set_resource_bit = 0; + + amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); - return amdgpu_ring_test_ring(kiq_ring); + set_resource_bit = mec * 4 * 8 + pipe * 8 + queue; + + return set_resource_bit; } int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) @@ -502,17 +520,18 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) break; } - queue_mask |= (1ull << i); + queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); } DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); - + spin_lock(&adev->gfx.kiq.ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); + spin_unlock(&adev->gfx.kiq.ring_lock); return r; } @@ -521,6 +540,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); r = amdgpu_ring_test_helper(kiq_ring); + spin_unlock(&adev->gfx.kiq.ring_lock); if (r) DRM_ERROR("KCQ enable failed\n"); @@ -540,30 +560,66 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) { - if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) - return; + unsigned long delay = GFX_OFF_DELAY_ENABLE; - if (!is_support_sw_smu(adev) && - (!adev->powerplay.pp_funcs || - !adev->powerplay.pp_funcs->set_powergating_by_smu)) + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return; - mutex_lock(&adev->gfx.gfx_off_mutex); - if (!enable) - adev->gfx.gfx_off_req_count++; - else if (adev->gfx.gfx_off_req_count > 0) + if (enable) { + /* If the count is already 0, it means there's an imbalance bug somewhere. + * Note that the bug may be in a different caller than the one which triggers the + * WARN_ON_ONCE. + */ + if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) + goto unlock; + adev->gfx.gfx_off_req_count--; - if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); - } else if (!enable && adev->gfx.gfx_off_state) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) - adev->gfx.gfx_off_state = false; + if (adev->gfx.gfx_off_req_count == 0 && + !adev->gfx.gfx_off_state) { + /* If going to s2idle, no need to wait */ + if (adev->in_s0ix) + delay = GFX_OFF_NO_DELAY; + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + delay); + } + } else { + if (adev->gfx.gfx_off_req_count == 0) { + cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); + + if (adev->gfx.gfx_off_state && + !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { + adev->gfx.gfx_off_state = false; + + if (adev->gfx.funcs->init_spm_golden) { + dev_dbg(adev->dev, + "GFXOFF is disabled, re-init SPM golden settings\n"); + amdgpu_gfx_init_spm_golden(adev); + } + } + } + + adev->gfx.gfx_off_req_count++; } +unlock: + mutex_unlock(&adev->gfx.gfx_off_mutex); +} + +int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) +{ + + int r = 0; + + mutex_lock(&adev->gfx.gfx_off_mutex); + + r = smu_get_status_gfxoff(adev, value); + mutex_unlock(&adev->gfx.gfx_off_mutex); + + return r; } int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) @@ -571,7 +627,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "gfx_err_count", - .debugfs_name = "gfx_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_gfx_process_ras_data_cb, @@ -584,16 +639,17 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; adev->gfx.ras_if->sub_block_index = 0; - strcpy(adev->gfx.ras_if->name, "gfx"); } fs_info.head = ih_info.head = *adev->gfx.ras_if; - r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, &fs_info, &ih_info); if (r) goto free; if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) goto late_fini; @@ -639,9 +695,10 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, */ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->gfx.funcs->query_ras_error_count) - adev->gfx.funcs->query_ras_error_count(adev, err_data); - amdgpu_ras_reset_gpu(adev, 0); + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->query_ras_error_count) + adev->gfx.ras_funcs->query_ras_error_count(adev, err_data); + amdgpu_ras_reset_gpu(adev); } return AMDGPU_RAS_SUCCESS; } @@ -664,3 +721,150 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } + +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq, reg_val_offs = 0, value = 0; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + if (amdgpu_device_wb_get(adev, ®_val_offs)) { + pr_err("critical bug! too many kiq readers\n"); + goto failed_unlock; + } + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + mb(); + value = adev->wb.wb[reg_val_offs]; + amdgpu_device_wb_free(adev, reg_val_offs); + return value; + +failed_undo: + amdgpu_ring_undo(ring); +failed_unlock: + spin_unlock_irqrestore(&kiq->ring_lock, flags); +failed_kiq_read: + if (reg_val_offs) + amdgpu_device_wb_free(adev, reg_val_offs); + dev_err(adev->dev, "failed to read reg:%x\n", reg); + return ~0; +} + +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_wreg); + + if (amdgpu_device_skip_hw_access(adev)) + return; + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_wreg(ring, reg, v); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) + goto failed_kiq_write; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_write; + + return; + +failed_undo: + amdgpu_ring_undo(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); +failed_kiq_write: + dev_err(adev->dev, "failed to write reg:%x\n", reg); +} + +int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) +{ + if (amdgpu_num_kcq == -1) { + return 8; + } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { + dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n"); + return 8; + } + return amdgpu_num_kcq; +} + +/* amdgpu_gfx_state_change_set - Handle gfx power state change set + * @adev: amdgpu_device pointer + * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) + * + */ + +void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state) +{ + mutex_lock(&adev->pm.mutex); + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->gfx_state_change_set) + ((adev)->powerplay.pp_funcs->gfx_state_change_set( + (adev)->powerplay.pp_handle, state)); + mutex_unlock(&adev->pm.mutex); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 0ae0a2715b0d..f851196c83a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -30,6 +30,7 @@ #include "clearstate_defs.h" #include "amdgpu_ring.h" #include "amdgpu_rlc.h" +#include "soc15.h" /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L @@ -41,6 +42,20 @@ #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES +enum amdgpu_gfx_pipe_priority { + AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1, + AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 +}; + +/* Argument for PPSMC_MSG_GpuChangeState */ +enum gfx_change_state { + sGpuChangeState_D0Entry = 1, + sGpuChangeState_D3Entry, +}; + +#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 +#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -76,11 +91,15 @@ struct kiq_pm4_funcs { struct amdgpu_ring *ring, u64 addr, u64 seq); + void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub); /* Packet sizes */ int set_resources_size; int map_queues_size; int unmap_queues_size; int query_status_size; + int invalidate_tlbs_size; }; struct amdgpu_kiq { @@ -121,6 +140,7 @@ struct gb_addr_config { uint8_t num_banks; uint8_t num_se; uint8_t num_rb_per_se; + uint8_t num_pkrs; }; struct amdgpu_gfx_config { @@ -146,6 +166,8 @@ struct amdgpu_gfx_config { unsigned num_gpus; unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; + unsigned num_banks; + unsigned num_ranks; unsigned gb_addr_config; unsigned num_rbs; unsigned gs_vgt_table_depth; @@ -182,6 +204,19 @@ struct amdgpu_cu_info { uint32_t bitmap[4][4]; }; +struct amdgpu_gfx_ras_funcs { + int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); + int (*ras_error_inject)(struct amdgpu_device *adev, + void *inject_if); + int (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); + void (*query_ras_error_status)(struct amdgpu_device *adev); + void (*reset_ras_error_status)(struct amdgpu_device *adev); + void (*enable_watchdog_timer)(struct amdgpu_device *adev); +}; + struct amdgpu_gfx_funcs { /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); @@ -197,8 +232,8 @@ struct amdgpu_gfx_funcs { uint32_t *dst); void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); - int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); - int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); + void (*init_spm_golden)(struct amdgpu_device *adev); + void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); }; struct sq_work { @@ -301,12 +336,14 @@ struct amdgpu_gfx { DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /*ras */ - struct ras_common_if *ras_if; + struct ras_common_if *ras_if; + const struct amdgpu_gfx_ras_funcs *ras_funcs; }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) +#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) /** * amdgpu_gfx_create_bitmask - create a bitmask @@ -348,10 +385,12 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue); -void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit, +void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, int pipe, int queue); +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, @@ -359,6 +398,7 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); +int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev); void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, @@ -367,4 +407,8 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); +int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); +void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h index ef31448ee8d8..beabab515836 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Advanced Micro Devices, Inc. + * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,26 +20,25 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#ifndef __AMDGPU_GFXHUB_H__ +#define __AMDGPU_GFXHUB_H__ -#ifndef __AMDGPU_PM_H__ -#define __AMDGPU_PM_H__ +struct amdgpu_gfxhub_funcs { + u64 (*get_fb_location)(struct amdgpu_device *adev); + u64 (*get_mc_fb_offset)(struct amdgpu_device *adev); + void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); + int (*gart_enable)(struct amdgpu_device *adev); -struct cg_flag_name -{ - u32 flag; - const char *name; + void (*gart_disable)(struct amdgpu_device *adev); + void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value); + void (*init)(struct amdgpu_device *adev); + int (*get_xgmi_info)(struct amdgpu_device *adev); + void (*utcl2_harvest)(struct amdgpu_device *adev); }; -void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); -int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); -int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev); -void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); -void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev); -void amdgpu_pm_print_power_states(struct amdgpu_device *adev); -int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version); -void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); -void amdgpu_dpm_thermal_work_handler(struct work_struct *work); -void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); -void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); +struct amdgpu_gfxhub { + const struct amdgpu_gfxhub_funcs *funcs; +}; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index a12f33c0f5df..08478fce00f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -27,9 +27,65 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include "amdgpu.h" +#include "amdgpu_gmc.h" #include "amdgpu_ras.h" #include "amdgpu_xgmi.h" +#include <drm/drm_drv.h> + +/** + * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 + * + * @adev: amdgpu_device pointer + * + * Allocate video memory for pdb0 and map it for CPU access + * Returns 0 for success, error for failure. + */ +int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev) +{ + int r; + struct amdgpu_bo_param bp; + u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; + uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; + uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift; + + memset(&bp, 0, sizeof(bp)); + bp.size = PAGE_ALIGN((npdes + 1) * 8); + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + + r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo); + if (r) + return r; + + r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false); + if (unlikely(r != 0)) + goto bo_reserve_failure; + + r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM); + if (r) + goto bo_pin_failure; + r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0); + if (r) + goto bo_kmap_failure; + + amdgpu_bo_unreserve(adev->gmc.pdb0_bo); + return 0; + +bo_kmap_failure: + amdgpu_bo_unpin(adev->gmc.pdb0_bo); +bo_pin_failure: + amdgpu_bo_unreserve(adev->gmc.pdb0_bo); +bo_reserve_failure: + amdgpu_bo_unref(&adev->gmc.pdb0_bo); + return r; +} + /** * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO * @@ -44,12 +100,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_dma_tt *ttm; - switch (bo->tbo.mem.mem_type) { + switch (bo->tbo.resource->mem_type) { case TTM_PL_TT: - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); - *addr = ttm->dma_address[0]; + *addr = bo->tbo.ttm->dma_address[0]; break; case TTM_PL_VRAM: *addr = amdgpu_bo_gpu_offset(bo); @@ -58,13 +112,12 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, *addr = 0; break; } - *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); + *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource); amdgpu_gmc_get_vm_pde(adev, level, addr, flags); } -/** +/* * amdgpu_gmc_pd_addr - return the address of the root directory - * */ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) { @@ -107,13 +160,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, value = addr & 0x0000FFFFFFFFF000ULL; value |= flags; writeq(value, ptr + (gpu_page_idx * 8)); + return 0; } /** * amdgpu_gmc_agp_addr - return the address in the AGP address space * - * @tbo: TTM BO which needs the address, must be in GTT domain + * @bo: TTM BO which needs the address, must be in GTT domain * * Tries to figure out how to access the BO through the AGP aperture. Returns * AMDGPU_BO_INVALID_OFFSET if that is not possible. @@ -121,23 +175,21 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); - struct ttm_dma_tt *ttm; - if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached) + if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) return AMDGPU_BO_INVALID_OFFSET; - ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); - if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) + if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) return AMDGPU_BO_INVALID_OFFSET; - return adev->gmc.agp_start + ttm->dma_address[0]; + return adev->gmc.agp_start + bo->ttm->dma_address[0]; } /** * amdgpu_gmc_vram_location - try to find VRAM location * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information * @base: base address at which to put VRAM * * Function will try to place VRAM at base address provided @@ -162,14 +214,46 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, mc->vram_end, mc->real_vram_size >> 20); } +/** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture + * + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information + * + * This function is only used if use GART for FB translation. In such + * case, we use sysvm aperture (vmid0 page tables) for both vram + * and gart (aka system memory) access. + * + * GPUVM (and our organization of vmid0 page tables) require sysvm + * aperture to be placed at a location aligned with 8 times of native + * page size. For example, if vm_context0_cntl.page_table_block_size + * is 12, then native page size is 8G (2M*2^12), sysvm should start + * with a 64G aligned address. For simplicity, we just put sysvm at + * address 0. So vram start at address 0 and gart is right after vram. + */ +void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) +{ + u64 hive_vram_start = 0; + u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; + mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; + mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; + mc->gart_start = hive_vram_end + 1; + mc->gart_end = mc->gart_start + mc->gart_size - 1; + mc->fb_start = hive_vram_start; + mc->fb_end = hive_vram_end; + dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", + mc->mc_vram_size >> 20, mc->vram_start, + mc->vram_end, mc->real_vram_size >> 20); + dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", + mc->gart_size >> 20, mc->gart_start, mc->gart_end); +} + /** * amdgpu_gmc_gart_location - try to find GART location * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information * * Function will place try to place GART before or after VRAM. - * * If GART size is bigger than space left then we ajust GART size. * Thus function will never fails. */ @@ -180,8 +264,6 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); - mc->gart_size += adev->pm.smu_prv_buffer_size; - /* VCE doesn't like it when BOs cross a 4GB segment, so align * the GART base on a 4GB boundary as well. */ @@ -207,8 +289,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) /** * amdgpu_gmc_agp_location - try to find AGP location - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations + * @adev: amdgpu device structure holding all necessary information + * @mc: memory controller structure holding memory information * * Function will place try to find a place for the AGP BAR in the MC address * space. @@ -223,7 +305,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) u64 size_af, size_bf; if (amdgpu_sriov_vf(adev)) { - mc->agp_start = 0xffffffff; + mc->agp_start = 0xffffffffffff; mc->agp_end = 0x0; mc->agp_size = 0; @@ -254,6 +336,17 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) } /** + * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid + * + * @addr: 48 bit physical address, page aligned (36 significant bits) + * @pasid: 16 bit process address space identifier + */ +static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid) +{ + return addr << 4 | pasid; +} + +/** * amdgpu_gmc_filter_faults - filter VM faults * * @adev: amdgpu device structure @@ -269,8 +362,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp) { struct amdgpu_gmc *gmc = &adev->gmc; - - uint64_t stamp, key = addr << 4 | pasid; + uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid); struct amdgpu_gmc_fault *fault; uint32_t hash; @@ -286,7 +378,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, while (fault->timestamp >= stamp) { uint64_t tmp; - if (fault->key == key) + if (atomic64_read(&fault->key) == key) return true; tmp = fault->timestamp; @@ -299,7 +391,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, /* Add the fault to the ring */ fault = &gmc->fault_ring[gmc->last_fault]; - fault->key = key; + atomic64_set(&fault->key, key); fault->timestamp = timestamp; /* And update the hash */ @@ -308,28 +400,428 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, return false; } +/** + * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter + * + * @adev: amdgpu device structure + * @addr: address of the VM fault + * @pasid: PASID of the process causing the fault + * + * Remove the address from fault filter, then future vm fault on this address + * will pass to retry fault handler to recover. + */ +void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, + uint16_t pasid) +{ + struct amdgpu_gmc *gmc = &adev->gmc; + uint64_t key = amdgpu_gmc_fault_key(addr, pasid); + struct amdgpu_gmc_fault *fault; + uint32_t hash; + uint64_t tmp; + + hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER); + fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; + do { + if (atomic64_cmpxchg(&fault->key, key, 0) == key) + break; + + tmp = fault->timestamp; + fault = &gmc->fault_ring[fault->next]; + } while (fault->timestamp < tmp); +} + int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) { int r; - if (adev->umc.funcs && adev->umc.funcs->ras_late_init) { - r = adev->umc.funcs->ras_late_init(adev); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ras_late_init) { + r = adev->umc.ras_funcs->ras_late_init(adev); + if (r) + return r; + } + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->ras_late_init) { + r = adev->mmhub.ras_funcs->ras_late_init(adev); + if (r) + return r; + } + + if (!adev->gmc.xgmi.connected_to_cpu) + adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs; + + if (adev->gmc.xgmi.ras_funcs && + adev->gmc.xgmi.ras_funcs->ras_late_init) { + r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev); + if (r) + return r; + } + + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->ras_late_init) { + r = adev->hdp.ras_funcs->ras_late_init(adev); if (r) return r; } - if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) { - r = adev->mmhub.funcs->ras_late_init(adev); + if (adev->mca.mp0.ras_funcs && + adev->mca.mp0.ras_funcs->ras_late_init) { + r = adev->mca.mp0.ras_funcs->ras_late_init(adev); if (r) return r; } - return amdgpu_xgmi_ras_late_init(adev); + if (adev->mca.mp1.ras_funcs && + adev->mca.mp1.ras_funcs->ras_late_init) { + r = adev->mca.mp1.ras_funcs->ras_late_init(adev); + if (r) + return r; + } + + if (adev->mca.mpio.ras_funcs && + adev->mca.mpio.ras_funcs->ras_late_init) { + r = adev->mca.mpio.ras_funcs->ras_late_init(adev); + if (r) + return r; + } + + return 0; } void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) { - amdgpu_umc_ras_fini(adev); - amdgpu_mmhub_ras_fini(adev); - amdgpu_xgmi_ras_fini(adev); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ras_fini) + adev->umc.ras_funcs->ras_fini(adev); + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->ras_fini) + adev->mmhub.ras_funcs->ras_fini(adev); + + if (adev->gmc.xgmi.ras_funcs && + adev->gmc.xgmi.ras_funcs->ras_fini) + adev->gmc.xgmi.ras_funcs->ras_fini(adev); + + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->ras_fini) + adev->hdp.ras_funcs->ras_fini(adev); +} + + /* + * The latest engine allocation on gfx9/10 is: + * Engine 2, 3: firmware + * Engine 0, 1, 4~16: amdgpu ring, + * subject to change when ring number changes + * Engine 17: Gart flushes + */ +#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 +#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 + +int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = + {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, + GFXHUB_FREE_VM_INV_ENGS_BITMAP}; + unsigned i; + unsigned vmhub, inv_eng; + + for (i = 0; i < adev->num_rings; ++i) { + ring = adev->rings[i]; + vmhub = ring->funcs->vmhub; + + if (ring == &adev->mes.ring) + continue; + + inv_eng = ffs(vm_inv_engs[vmhub]); + if (!inv_eng) { + dev_err(adev->dev, "no VM inv eng for ring %s\n", + ring->name); + return -EINVAL; + } + + ring->vm_inv_eng = inv_eng - 1; + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); + + dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", + ring->name, ring->vm_inv_eng, ring->funcs->vmhub); + } + + return 0; +} + +/** + * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ + * @adev: amdgpu_device pointer + * + * Check and set if an the device @adev supports Trusted Memory + * Zones (TMZ). + */ +void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_RAVEN: + case CHIP_RENOIR: + if (amdgpu_tmz == 0) { + adev->gmc.tmz_enabled = false; + dev_info(adev->dev, + "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n"); + } else { + adev->gmc.tmz_enabled = true; + dev_info(adev->dev, + "Trusted Memory Zone (TMZ) feature enabled\n"); + } + break; + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: + /* Don't enable it by default yet. + */ + if (amdgpu_tmz < 1) { + adev->gmc.tmz_enabled = false; + dev_info(adev->dev, + "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n"); + } else { + adev->gmc.tmz_enabled = true; + dev_info(adev->dev, + "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n"); + } + break; + default: + adev->gmc.tmz_enabled = false; + dev_info(adev->dev, + "Trusted Memory Zone (TMZ) feature not supported\n"); + break; + } +} + +/** + * amdgpu_gmc_noretry_set -- set per asic noretry defaults + * @adev: amdgpu_device pointer + * + * Set a per asic default for the no-retry parameter. + * + */ +void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) +{ + struct amdgpu_gmc *gmc = &adev->gmc; + + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_VEGA20: + case CHIP_ARCTURUS: + case CHIP_ALDEBARAN: + /* + * noretry = 0 will cause kfd page fault tests fail + * for some ASICs, so set default to 1 for these ASICs. + */ + if (amdgpu_noretry == -1) + gmc->noretry = 1; + else + gmc->noretry = amdgpu_noretry; + break; + case CHIP_RAVEN: + default: + /* Raven currently has issues with noretry + * regardless of what we decide for other + * asics, we should leave raven with + * noretry = 0 until we root cause the + * issues. + * + * default this to 0 for now, but we may want + * to change this in the future for certain + * GPUs as it can increase performance in + * certain cases. + */ + if (amdgpu_noretry == -1) + gmc->noretry = 0; + else + gmc->noretry = amdgpu_noretry; + break; + } +} + +void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, + bool enable) +{ + struct amdgpu_vmhub *hub; + u32 tmp, reg, i; + + hub = &adev->vmhub[hub_type]; + for (i = 0; i < 16; i++) { + reg = hub->vm_context0_cntl + hub->ctx_distance * i; + + tmp = (hub_type == AMDGPU_GFXHUB_0) ? + RREG32_SOC15_IP(GC, reg) : + RREG32_SOC15_IP(MMHUB, reg); + + if (enable) + tmp |= hub->vm_cntx_cntl_vm_fault; + else + tmp &= ~hub->vm_cntx_cntl_vm_fault; + + (hub_type == AMDGPU_GFXHUB_0) ? + WREG32_SOC15_IP(GC, reg, tmp) : + WREG32_SOC15_IP(MMHUB, reg, tmp); + } +} + +void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) +{ + unsigned size; + + /* + * TODO: + * Currently there is a bug where some memory client outside + * of the driver writes to first 8M of VRAM on S3 resume, + * this overrides GART which by default gets placed in first 8M and + * causes VM_FAULTS once GTT is accessed. + * Keep the stolen memory reservation until the while this is not solved. + */ + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_RAVEN: + case CHIP_RENOIR: + adev->mman.keep_stolen_vga_memory = true; + break; + default: + adev->mman.keep_stolen_vga_memory = false; + break; + } + + if (amdgpu_sriov_vf(adev) || + !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { + size = 0; + } else { + size = amdgpu_gmc_get_vbios_fb_size(adev); + + if (adev->mman.keep_stolen_vga_memory) + size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); + } + + /* set to 0 if the pre-OS buffer uses up most of vram */ + if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) + size = 0; + + if (size > AMDGPU_VBIOS_VGA_ALLOCATION) { + adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION; + adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size; + } else { + adev->mman.stolen_vga_size = size; + adev->mman.stolen_extended_size = 0; + } +} + +/** + * amdgpu_gmc_init_pdb0 - initialize PDB0 + * + * @adev: amdgpu_device pointer + * + * This function is only used when GART page table is used + * for FB address translatioin. In such a case, we construct + * a 2-level system VM page table: PDB0->PTB, to cover both + * VRAM of the hive and system memory. + * + * PDB0 is static, initialized once on driver initialization. + * The first n entries of PDB0 are used as PTE by setting + * P bit to 1, pointing to VRAM. The n+1'th entry points + * to a big PTB covering system memory. + * + */ +void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) +{ + int i; + uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW? + /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M + */ + u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; + u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; + u64 vram_addr = adev->vm_manager.vram_base_offset - + adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + u64 vram_end = vram_addr + vram_size; + u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); + int idx; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; + + flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; + flags |= AMDGPU_PTE_WRITEABLE; + flags |= AMDGPU_PTE_SNOOPED; + flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); + flags |= AMDGPU_PDE_PTE; + + /* The first n PDE0 entries are used as PTE, + * pointing to vram + */ + for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size) + amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags); + + /* The n+1'th PDE0 entry points to a huge + * PTB who has more than 512 entries each + * pointing to a 4K system page + */ + flags = AMDGPU_PTE_VALID; + flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; + /* Requires gart_ptb_gpu_pa to be 4K aligned */ + amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); + drm_dev_exit(idx); +} + +/** + * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC + * address + * + * @adev: amdgpu_device pointer + * @mc_addr: MC address of buffer + */ +uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr) +{ + return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; +} + +/** + * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from + * GPU's view + * + * @adev: amdgpu_device pointer + * @bo: amdgpu buffer object + */ +uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) +{ + return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo)); +} + +/** + * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address + * from CPU's view + * + * @adev: amdgpu_device pointer + * @bo: amdgpu buffer object + */ +uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) +{ + return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; +} + +void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev) +{ + /* Some ASICs need to reserve a region of video memory to avoid access + * from driver */ + adev->mman.stolen_reserved_offset = 0; + adev->mman.stolen_reserved_size = 0; + + switch (adev->asic_type) { + case CHIP_YELLOW_CARP: + if (amdgpu_discovery == 0) { + adev->mman.stolen_reserved_offset = 0x1ffb0000; + adev->mman.stolen_reserved_size = 64 * PAGE_SIZE; + } + break; + default: + break; + } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index b499a3de8bb6..e55201134a01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -66,14 +66,20 @@ struct firmware; * GMC page fault information */ struct amdgpu_gmc_fault { - uint64_t timestamp; + uint64_t timestamp:48; uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER; - uint64_t key:52; + atomic64_t key; }; /* * VMHUB structures, functions & helpers */ +struct amdgpu_vmhub_funcs { + void (*print_l2_protection_fault_status)(struct amdgpu_device *adev, + uint32_t status); + uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type); +}; + struct amdgpu_vmhub { uint32_t ctx0_ptb_addr_lo32; uint32_t ctx0_ptb_addr_hi32; @@ -83,6 +89,19 @@ struct amdgpu_vmhub { uint32_t vm_context0_cntl; uint32_t vm_l2_pro_fault_status; uint32_t vm_l2_pro_fault_cntl; + + /* + * store the register distances between two continuous context domain + * and invalidation engine. + */ + uint32_t ctx_distance; + uint32_t ctx_addr_distance; /* include LO32/HI32 */ + uint32_t eng_distance; + uint32_t eng_addr_distance; /* include LO32/HI32 */ + + uint32_t vm_cntx_cntl_vm_fault; + + const struct amdgpu_vmhub_funcs *vmhub_funcs; }; /* @@ -92,6 +111,9 @@ struct amdgpu_gmc_funcs { /* flush the vm tlb via mmio */ void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type); + /* flush the vm tlb via pasid */ + int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid, + uint32_t flush_type, bool all_hub); /* flush the vm tlb via ring */ uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); @@ -109,6 +131,16 @@ struct amdgpu_gmc_funcs { void (*get_vm_pte)(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping, uint64_t *flags); + /* get the amount of memory used by the vbios for pre-OS console */ + unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); +}; + +struct amdgpu_xgmi_ras_funcs { + int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); + int (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); }; struct amdgpu_xgmi { @@ -125,6 +157,9 @@ struct amdgpu_xgmi { struct list_head head; bool supported; struct ras_common_if *ras_if; + bool connected_to_cpu; + bool pending_reset; + const struct amdgpu_xgmi_ras_funcs *ras_funcs; }; struct amdgpu_gmc { @@ -165,10 +200,13 @@ struct amdgpu_gmc { u64 gart_end; /* Frame buffer aperture of this GPU device. Different from * fb_start (see below), this only covers the local GPU device. - * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios) - * and calculate vram_start of this local device by adding an - * offset inside the XGMI hive. - * Under VMID0, logical address == MC address + * If driver uses FB aperture to access FB, driver get fb_start from + * MC_VM_FB_LOCATION_BASE (set by vbios) and calculate vram_start + * of this local device by adding an offset inside the XGMI hive. + * If driver uses GART table for VMID0 FB access, driver finds a hole in + * VMID0's virtual address space to place the SYSVM aperture inside + * which the first part is vram and the second part is gart (covering + * system ram). */ u64 vram_start; u64 vram_end; @@ -191,7 +229,7 @@ struct amdgpu_gmc { uint8_t vram_vendor; uint32_t srbm_soft_reset; bool prt_warning; - uint64_t stolen_size; + uint32_t sdpif_register; /* apertures */ u64 shared_aperture_start; u64 shared_aperture_end; @@ -209,18 +247,31 @@ struct amdgpu_gmc { } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE]; uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER; + bool tmz_enabled; + const struct amdgpu_gmc_funcs *gmc_funcs; struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; + int noretry; + + uint32_t vmid0_page_table_block_size; + uint32_t vmid0_page_table_depth; + struct amdgpu_bo *pdb0_bo; + /* CPU kmapped address of pdb0*/ + void *ptr_pdb0; }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) +#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \ + ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \ + ((adev), (pasid), (type), (allhub))) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) +#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) /** * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR @@ -250,6 +301,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) return addr; } +int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev); void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags); int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, @@ -257,6 +309,7 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, uint64_t flags); uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo); +void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, u64 base); void amdgpu_gmc_gart_location(struct amdgpu_device *adev, @@ -265,7 +318,24 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp); +void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, + uint16_t pasid); int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); +int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); + +extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev); +extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev); + +extern void +amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, + bool enable); + +void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); +void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev); +void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); +uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); +uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); +uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 627104401e84..675a72ef305d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -22,19 +22,27 @@ * Authors: Christian König */ -#include "amdgpu.h" +#include <drm/ttm/ttm_range_manager.h> -struct amdgpu_gtt_mgr { - struct drm_mm mm; - spinlock_t lock; - atomic64_t available; -}; +#include "amdgpu.h" struct amdgpu_gtt_node { - struct drm_mm_node node; struct ttm_buffer_object *tbo; + struct ttm_range_mgr_node base; }; +static inline struct amdgpu_gtt_mgr * +to_gtt_mgr(struct ttm_resource_manager *man) +{ + return container_of(man, struct amdgpu_gtt_mgr, manager); +} + +static inline struct amdgpu_gtt_node * +to_amdgpu_gtt_node(struct ttm_resource *res) +{ + return container_of(res, struct amdgpu_gtt_node, base.base); +} + /** * DOC: mem_info_gtt_total * @@ -44,13 +52,15 @@ struct amdgpu_gtt_node { * the GTT block, in bytes */ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man; - return snprintf(buf, PAGE_SIZE, "%llu\n", - (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE); + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE); } /** @@ -62,13 +72,15 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, * size of the GTT block, in bytes */ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man; - return snprintf(buf, PAGE_SIZE, "%llu\n", - amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT])); + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man)); } static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, @@ -76,134 +88,28 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO, amdgpu_mem_info_gtt_used_show, NULL); -/** - * amdgpu_gtt_mgr_init - init GTT manager and DRM MM - * - * @man: TTM memory type manager - * @p_size: maximum size of GTT - * - * Allocate and initialize the GTT manager. - */ -static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, - unsigned long p_size) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_gtt_mgr *mgr; - uint64_t start, size; - int ret; - - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - if (!mgr) - return -ENOMEM; - - start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; - size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; - drm_mm_init(&mgr->mm, start, size); - spin_lock_init(&mgr->lock); - atomic64_set(&mgr->available, p_size); - man->priv = mgr; - - ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_gtt_total\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_gtt_used\n"); - return ret; - } - - return 0; -} - -/** - * amdgpu_gtt_mgr_fini - free and destroy GTT manager - * - * @man: TTM memory type manager - * - * Destroy and free the GTT manager, returns -EBUSY if ranges are still - * allocated inside it. - */ -static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_gtt_mgr *mgr = man->priv; - spin_lock(&mgr->lock); - drm_mm_takedown(&mgr->mm); - spin_unlock(&mgr->lock); - kfree(mgr); - man->priv = NULL; - - device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total); - device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used); +static struct attribute *amdgpu_gtt_mgr_attributes[] = { + &dev_attr_mem_info_gtt_total.attr, + &dev_attr_mem_info_gtt_used.attr, + NULL +}; - return 0; -} +const struct attribute_group amdgpu_gtt_mgr_attr_group = { + .attrs = amdgpu_gtt_mgr_attributes +}; /** * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space * - * @mem: the mem object to check + * @res: the mem object to check * * Check if a mem object has already address space allocated. */ -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res) { - struct amdgpu_gtt_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); - return (node->node.start != AMDGPU_BO_INVALID_OFFSET); -} - -/** - * amdgpu_gtt_mgr_alloc - allocate new ranges - * - * @man: TTM memory type manager - * @tbo: TTM BO we need this range for - * @place: placement flags and restrictions - * @mem: the resulting mem object - * - * Allocate the address space for a node. - */ -static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *tbo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_gtt_mgr *mgr = man->priv; - struct amdgpu_gtt_node *node = mem->mm_node; - enum drm_mm_insert_mode mode; - unsigned long fpfn, lpfn; - int r; - - if (amdgpu_gtt_mgr_has_gart_addr(mem)) - return 0; - - if (place) - fpfn = place->fpfn; - else - fpfn = 0; - - if (place && place->lpfn) - lpfn = place->lpfn; - else - lpfn = adev->gart.num_cpu_pages; - - mode = DRM_MM_INSERT_BEST; - if (place && place->flags & TTM_PL_FLAG_TOPDOWN) - mode = DRM_MM_INSERT_HIGH; - - spin_lock(&mgr->lock); - r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, - mem->page_alignment, 0, fpfn, lpfn, - mode); - spin_unlock(&mgr->lock); - - if (!r) - mem->start = node->node.start; - - return r; + return drm_mm_node_allocated(&node->base.mm_nodes[0]); } /** @@ -212,54 +118,62 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, * @man: TTM memory type manager * @tbo: TTM BO we need this range for * @place: placement flags and restrictions - * @mem: the resulting mem object + * @res: the resulting mem object * * Dummy, allocate the node but no space for it yet. */ -static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource **res) { - struct amdgpu_gtt_mgr *mgr = man->priv; + struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); + uint32_t num_pages = PFN_UP(tbo->base.size); struct amdgpu_gtt_node *node; int r; - spin_lock(&mgr->lock); - if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && - atomic64_read(&mgr->available) < mem->num_pages) { - spin_unlock(&mgr->lock); - return 0; + if (!(place->flags & TTM_PL_FLAG_TEMPORARY) && + atomic64_add_return(num_pages, &mgr->used) > man->size) { + atomic64_sub(num_pages, &mgr->used); + return -ENOSPC; } - atomic64_sub(mem->num_pages, &mgr->available); - spin_unlock(&mgr->lock); - node = kzalloc(sizeof(*node), GFP_KERNEL); + node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); if (!node) { r = -ENOMEM; goto err_out; } - node->node.start = AMDGPU_BO_INVALID_OFFSET; - node->node.size = mem->num_pages; node->tbo = tbo; - mem->mm_node = node; - - if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { - r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); - if (unlikely(r)) { - kfree(node); - mem->mm_node = NULL; - r = 0; - goto err_out; - } + ttm_resource_init(tbo, place, &node->base.base); + + if (place->lpfn) { + spin_lock(&mgr->lock); + r = drm_mm_insert_node_in_range(&mgr->mm, + &node->base.mm_nodes[0], + num_pages, tbo->page_alignment, + 0, place->fpfn, place->lpfn, + DRM_MM_INSERT_BEST); + spin_unlock(&mgr->lock); + if (unlikely(r)) + goto err_free; + + node->base.base.start = node->base.mm_nodes[0].start; } else { - mem->start = node->node.start; + node->base.mm_nodes[0].start = 0; + node->base.mm_nodes[0].size = node->base.base.num_pages; + node->base.base.start = AMDGPU_BO_INVALID_OFFSET; } + *res = &node->base.base; return 0; + +err_free: + kfree(node); + err_out: - atomic64_add(mem->num_pages, &mgr->available); + if (!(place->flags & TTM_PL_FLAG_TEMPORARY)) + atomic64_sub(num_pages, &mgr->used); return r; } @@ -268,29 +182,25 @@ err_out: * amdgpu_gtt_mgr_del - free ranges * * @man: TTM memory type manager - * @tbo: TTM BO we need this range for - * @place: placement flags and restrictions - * @mem: TTM memory object + * @res: TTM memory object * * Free the allocated GTT again. */ -static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) +static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, + struct ttm_resource *res) { - struct amdgpu_gtt_mgr *mgr = man->priv; - struct amdgpu_gtt_node *node = mem->mm_node; - - if (!node) - return; + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); + struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); spin_lock(&mgr->lock); - if (node->node.start != AMDGPU_BO_INVALID_OFFSET) - drm_mm_remove_node(&node->node); + if (drm_mm_node_allocated(&node->base.mm_nodes[0])) + drm_mm_remove_node(&node->base.mm_nodes[0]); spin_unlock(&mgr->lock); - atomic64_add(mem->num_pages, &mgr->available); + + if (!(res->placement & TTM_PL_FLAG_TEMPORARY)) + atomic64_sub(res->num_pages, &mgr->used); kfree(node); - mem->mm_node = NULL; } /** @@ -300,30 +210,40 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, * * Return how many bytes are used in the GTT domain */ -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) { - struct amdgpu_gtt_mgr *mgr = man->priv; - s64 result = man->size - atomic64_read(&mgr->available); + struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - return (result > 0 ? result : 0) * PAGE_SIZE; + return atomic64_read(&mgr->used) * PAGE_SIZE; } -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) +/** + * amdgpu_gtt_mgr_recover - re-init gart + * + * @man: TTM memory type manager + * + * Re-init the gart for each known BO in the GTT. + */ +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) { - struct amdgpu_gtt_mgr *mgr = man->priv; + struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); + struct amdgpu_device *adev; struct amdgpu_gtt_node *node; struct drm_mm_node *mm_node; int r = 0; + adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); spin_lock(&mgr->lock); drm_mm_for_each_node(mm_node, &mgr->mm) { - node = container_of(mm_node, struct amdgpu_gtt_node, node); + node = container_of(mm_node, typeof(*node), base.mm_nodes[0]); r = amdgpu_ttm_recover_gart(node->tbo); if (r) break; } spin_unlock(&mgr->lock); + amdgpu_gart_invalidate_tlb(adev); + return r; } @@ -335,24 +255,79 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { - struct amdgpu_gtt_mgr *mgr = man->priv; + struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); spin_lock(&mgr->lock); drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); - drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n", - man->size, (u64)atomic64_read(&mgr->available), - amdgpu_gtt_mgr_usage(man) >> 20); + drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n", + man->size, atomic64_read(&mgr->used)); } -const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { - .init = amdgpu_gtt_mgr_init, - .takedown = amdgpu_gtt_mgr_fini, - .get_node = amdgpu_gtt_mgr_new, - .put_node = amdgpu_gtt_mgr_del, +static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { + .alloc = amdgpu_gtt_mgr_new, + .free = amdgpu_gtt_mgr_del, .debug = amdgpu_gtt_mgr_debug }; + +/** + * amdgpu_gtt_mgr_init - init GTT manager and DRM MM + * + * @adev: amdgpu_device pointer + * @gtt_size: maximum size of GTT + * + * Allocate and initialize the GTT manager. + */ +int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) +{ + struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + uint64_t start, size; + + man->use_tt = true; + man->func = &amdgpu_gtt_mgr_func; + + ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); + + start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; + size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; + drm_mm_init(&mgr->mm, start, size); + spin_lock_init(&mgr->lock); + atomic64_set(&mgr->used, 0); + + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); + ttm_resource_manager_set_used(man, true); + return 0; +} + +/** + * amdgpu_gtt_mgr_fini - free and destroy GTT manager + * + * @adev: amdgpu_device pointer + * + * Destroy and free the GTT manager, returns -EBUSY if ranges are still + * allocated inside it. + */ +void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) +{ + struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + + ttm_resource_manager_set_used(man, false); + + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + if (ret) + return; + + spin_lock(&mgr->lock); + drm_mm_takedown(&mgr->mm); + spin_unlock(&mgr->lock); + + ttm_resource_manager_cleanup(man); + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c new file mode 100644 index 000000000000..a766e1aad2b9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -0,0 +1,68 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "hdp_err_count", + }; + + if (!adev->hdp.ras_if) { + adev->hdp.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->hdp.ras_if) + return -ENOMEM; + adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP; + adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->hdp.ras_if->sub_block_index = 0; + } + ih_info.head = fs_info.head = *adev->hdp.ras_if; + r = amdgpu_ras_late_init(adev, adev->hdp.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->hdp.ras_if->block)) { + kfree(adev->hdp.ras_if); + adev->hdp.ras_if = NULL; + } + + return r; +} + +void amdgpu_hdp_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) && + adev->hdp.ras_if) { + struct ras_common_if *ras_if = adev->hdp.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h new file mode 100644 index 000000000000..7ec99d591584 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -0,0 +1,51 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_HDP_H__ +#define __AMDGPU_HDP_H__ + +struct amdgpu_hdp_ras_funcs { + int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); +}; + +struct amdgpu_hdp_funcs { + void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + void (*invalidate_hdp)(struct amdgpu_device *adev, + struct amdgpu_ring *ring); + void (*update_clock_gating)(struct amdgpu_device *adev, bool enable); + void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags); + void (*init_registers)(struct amdgpu_device *adev); +}; + +struct amdgpu_hdp { + struct ras_common_if *ras_if; + const struct amdgpu_hdp_funcs *funcs; + const struct amdgpu_hdp_ras_funcs *ras_funcs; +}; + +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev); +void amdgpu_hdp_ras_fini(struct amdgpu_device *adev); +#endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 70dbe343f51d..82608df43396 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -40,7 +40,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap) { struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t temp; @@ -82,7 +82,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap) static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap) { struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t temp; @@ -101,7 +101,7 @@ static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap) static int amdgpu_i2c_get_clock(void *i2c_priv) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -116,7 +116,7 @@ static int amdgpu_i2c_get_clock(void *i2c_priv) static int amdgpu_i2c_get_data(void *i2c_priv) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -130,7 +130,7 @@ static int amdgpu_i2c_get_data(void *i2c_priv) static void amdgpu_i2c_set_clock(void *i2c_priv, int clock) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -143,7 +143,7 @@ static void amdgpu_i2c_set_clock(void *i2c_priv, int clock) static void amdgpu_i2c_set_data(void *i2c_priv, int data) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); mutex_init(&i2c->mutex); @@ -253,7 +253,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev, const struct amdgpu_i2c_bus_rec *rec, const char *name) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); int i; for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { @@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, void amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector) { - u8 val; + u8 val = 0; if (!amdgpu_connector->router.ddc_valid) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 60655834d649..bc1297dcdf97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -30,7 +30,6 @@ #include <linux/slab.h> #include <drm/amdgpu_drm.h> -#include <drm/drm_debugfs.h> #include "amdgpu.h" #include "atom.h" @@ -48,13 +47,14 @@ * produce command buffers which are send to the kernel and * put in IBs for execution by the requested ring. */ -static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); /** * amdgpu_ib_get - request an IB (Indirect Buffer) * - * @ring: ring index the IB is associated with + * @adev: amdgpu_device pointer + * @vm: amdgpu_vm pointer * @size: requested IB size + * @pool_type: IB pool type (delayed, immediate, direct) * @ib: IB object returned * * Request an IB (all asics). IBs are allocated using the @@ -62,12 +62,13 @@ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); * Returns 0 on success, error on failure. */ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned size, struct amdgpu_ib *ib) + unsigned size, enum amdgpu_ib_pool_type pool_type, + struct amdgpu_ib *ib) { int r; if (size) { - r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, + r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type], &ib->sa_bo, size, 256); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); @@ -75,6 +76,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, } ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); + /* flush the cache before commit the IB */ + ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC; if (!vm) ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); @@ -101,9 +104,10 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, /** * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring * - * @adev: amdgpu_device pointer + * @ring: ring index the IB is associated with * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule + * @job: job to schedule * @f: fence created during this submission * * Schedule an IB on the associated ring (all asics). @@ -126,12 +130,13 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; - bool skip_preamble, need_ctx_switch; + bool need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; uint64_t fence_ctx; uint32_t status = 0, alloc_size; unsigned fence_flags = 0; + bool secure; unsigned i; int r = 0; @@ -160,6 +165,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return -EINVAL; } + if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) && + (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) { + dev_err(adev->dev, "secure submissions not supported on compute rings\n"); + return -EINVAL; + } + alloc_size = ring->funcs->emit_frame_size + num_ibs * ring->funcs->emit_ib_size; @@ -171,7 +182,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && - ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || + ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; @@ -182,6 +193,13 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, dma_fence_put(tmp); } + if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) + ring->funcs->emit_mem_sync(ring); + + if (ring->funcs->emit_wave_limit && + ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) + ring->funcs->emit_wave_limit(ring, true); + if (ring->funcs->insert_start) ring->funcs->insert_start(ring); @@ -196,48 +214,44 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (job && ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); -#ifdef CONFIG_X86_64 - if (!(adev->flags & AMD_IS_APU)) -#endif - { - if (ring->funcs->emit_hdp_flush) - amdgpu_ring_emit_hdp_flush(ring); - else - amdgpu_asic_flush_hdp(adev, ring); - } + amdgpu_device_flush_hdp(adev, ring); if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; - skip_preamble = ring->current_ctx == fence_ctx; if (job && ring->funcs->emit_cntxcntl) { status |= job->preamble_status; status |= job->preemption_status; amdgpu_ring_emit_cntxcntl(ring, status); } + /* Setup initial TMZiness and send it off. + */ + secure = false; + if (job && ring->funcs->emit_frame_cntl) { + secure = ib->flags & AMDGPU_IB_FLAGS_SECURE; + amdgpu_ring_emit_frame_cntl(ring, true, secure); + } + for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; - /* drop preamble IBs if we don't have a context switch */ - if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && - skip_preamble && - !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && - !amdgpu_mcbp && - !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ - continue; + if (job && ring->funcs->emit_frame_cntl) { + if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) { + amdgpu_ring_emit_frame_cntl(ring, false, secure); + secure = !secure; + amdgpu_ring_emit_frame_cntl(ring, true, secure); + } + } amdgpu_ring_emit_ib(ring, job, ib, status); status &= ~AMDGPU_HAVE_CTX_SWITCH; } - if (ring->funcs->emit_tmz) - amdgpu_ring_emit_tmz(ring, false); + if (job && ring->funcs->emit_frame_cntl) + amdgpu_ring_emit_frame_cntl(ring, false, secure); -#ifdef CONFIG_X86_64 - if (!(adev->flags & AMD_IS_APU)) -#endif - amdgpu_asic_invalidate_hdp(adev, ring); + amdgpu_device_invalidate_hdp(adev, ring); if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; @@ -248,7 +262,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, fence_flags | AMDGPU_FENCE_FLAG_64BIT); } - r = amdgpu_fence_emit(ring, f, fence_flags); + r = amdgpu_fence_emit(ring, f, job, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -266,6 +280,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ring->current_ctx = fence_ctx; if (vm && ring->funcs->emit_switch_buffer) amdgpu_ring_emit_switch_buffer(ring); + + if (ring->funcs->emit_wave_limit && + ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) + ring->funcs->emit_wave_limit(ring, false); + amdgpu_ring_commit(ring); return 0; } @@ -281,24 +300,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, */ int amdgpu_ib_pool_init(struct amdgpu_device *adev) { - int r; + int r, i; - if (adev->ib_pool_ready) { + if (adev->ib_pool_ready) return 0; - } - r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, - AMDGPU_IB_POOL_SIZE*64*1024, - AMDGPU_GPU_PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT); - if (r) { - return r; - } - adev->ib_pool_ready = true; - if (amdgpu_debugfs_sa_init(adev)) { - dev_err(adev->dev, "failed to register debugfs file for SA\n"); + for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { + r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], + AMDGPU_IB_POOL_SIZE, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT); + if (r) + goto error; } + adev->ib_pool_ready = true; + return 0; + +error: + while (i--) + amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); + return r; } /** @@ -311,10 +333,14 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev) */ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) { - if (adev->ib_pool_ready) { - amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); - adev->ib_pool_ready = false; - } + int i; + + if (!adev->ib_pool_ready) + return; + + for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) + amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); + adev->ib_pool_ready = false; } /** @@ -329,9 +355,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) */ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) { - unsigned i; - int r, ret = 0; long tmo_gfx, tmo_mm; + int r, ret = 0; + unsigned i; tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; if (amdgpu_sriov_vf(adev)) { @@ -403,29 +429,34 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) */ #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) +static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); + seq_printf(m, "--------------------- DELAYED --------------------- \n"); + amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], + m); + seq_printf(m, "-------------------- IMMEDIATE -------------------- \n"); + amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE], + m); + seq_printf(m, "--------------------- DIRECT ---------------------- \n"); + amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m); return 0; - } -static const struct drm_info_list amdgpu_debugfs_sa_list[] = { - {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, -}; +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info); #endif -static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) +void amdgpu_debugfs_sa_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); -#else - return 0; + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_sa_info", 0444, root, adev, + &amdgpu_debugfs_sa_info_fops); + #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 6f9289735e31..b7fb72bff2c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -43,7 +43,7 @@ static DEFINE_IDA(amdgpu_pasid_ida); /* Helper to free pasid from a fence callback */ struct amdgpu_pasid_cb { struct dma_fence_cb cb; - unsigned int pasid; + u32 pasid; }; /** @@ -79,7 +79,7 @@ int amdgpu_pasid_alloc(unsigned int bits) * amdgpu_pasid_free - Free a PASID * @pasid: PASID to free */ -void amdgpu_pasid_free(unsigned int pasid) +void amdgpu_pasid_free(u32 pasid) { trace_amdgpu_pasid_freed(pasid); ida_simple_remove(&amdgpu_pasid_ida, pasid); @@ -105,14 +105,14 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence, * Free the pasid only after all the fences in resv are signaled. */ void amdgpu_pasid_free_delayed(struct dma_resv *resv, - unsigned int pasid) + u32 pasid) { struct dma_fence *fence, **fences; struct amdgpu_pasid_cb *cb; unsigned count; int r; - r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); + r = dma_resv_get_fences(resv, NULL, &count, &fences); if (r) goto fallback; @@ -156,8 +156,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, - MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } @@ -183,7 +182,7 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, } /** - * amdgpu_vm_grab_idle - grab idle VMID + * amdgpu_vmid_grab_idle - grab idle VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to @@ -206,16 +205,20 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, int r; if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait)) - return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false); + return amdgpu_sync_fence(sync, ring->vmid_wait); - fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); + fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); if (!fences) return -ENOMEM; /* Check if we have an idle VMID */ i = 0; list_for_each_entry((*idle), &id_mgr->ids_lru, list) { - fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); + /* Don't use per engine and per process VMID at the same time */ + struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? + NULL : ring; + + fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); if (!fences[i]) break; ++i; @@ -241,7 +244,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_sync_fence(adev, sync, &array->base, false); + r = amdgpu_sync_fence(sync, &array->base); dma_fence_put(ring->vmid_wait); ring->vmid_wait = &array->base; return r; @@ -252,13 +255,14 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, } /** - * amdgpu_vm_grab_reserved - try to assign reserved VMID + * amdgpu_vmid_grab_reserved - try to assign reserved VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to * @sync: sync object where we add dependencies * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID + * @id: resulting VMID * * Try to assign a reserved VMID. */ @@ -280,21 +284,25 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, if (updates && (*id)->flushed_updates && updates->context == (*id)->flushed_updates->context && !dma_fence_is_later(updates, (*id)->flushed_updates)) - updates = NULL; + updates = NULL; - if ((*id)->owner != vm->direct.fence_context || + if ((*id)->owner != vm->immediate.fence_context || job->vm_pd_addr != (*id)->pd_gpu_addr || updates || !(*id)->last_flush || ((*id)->last_flush->context != fence_context && !dma_fence_is_signaled((*id)->last_flush))) { struct dma_fence *tmp; + /* Don't use per engine and per process VMID at the same time */ + if (adev->vm_manager.concurrent_flush) + ring = NULL; + /* to prevent one context starved by another context */ (*id)->pd_gpu_addr = 0; tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); if (tmp) { *id = NULL; - r = amdgpu_sync_fence(adev, sync, tmp, false); + r = amdgpu_sync_fence(sync, tmp); return r; } needs_flush = true; @@ -303,7 +311,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); + r = amdgpu_sync_fence(&(*id)->active, fence); if (r) return r; @@ -316,7 +324,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, } /** - * amdgpu_vm_grab_used - try to reuse a VMID + * amdgpu_vmid_grab_used - try to reuse a VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to @@ -349,7 +357,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct dma_fence *flushed; /* Check all the prerequisites to using this VMID */ - if ((*id)->owner != vm->direct.fence_context) + if ((*id)->owner != vm->immediate.fence_context) continue; if ((*id)->pd_gpu_addr != job->vm_pd_addr) @@ -364,18 +372,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, if (updates && (!flushed || dma_fence_is_later(updates, flushed))) needs_flush = true; - /* Concurrent flushes are only possible starting with Vega10 and - * are broken on Navi10 and Navi14. - */ - if (needs_flush && (adev->asic_type < CHIP_VEGA10 || - adev->asic_type == CHIP_NAVI10 || - adev->asic_type == CHIP_NAVI14)) + if (needs_flush && !adev->vm_manager.concurrent_flush) continue; /* Good, we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); + r = amdgpu_sync_fence(&(*id)->active, fence); if (r) return r; @@ -393,7 +396,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, } /** - * amdgpu_vm_grab_id - allocate the next free VMID + * amdgpu_vmid_grab - allocate the next free VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to @@ -435,8 +438,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id = idle; /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, - fence, false); + r = amdgpu_sync_fence(&id->active, fence); if (r) goto error; @@ -449,7 +451,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } id->pd_gpu_addr = job->vm_pd_addr; - id->owner = vm->direct.fence_context; + id->owner = vm->immediate.fence_context; if (job->vm_needs_flush) { dma_fence_put(id->last_flush); @@ -515,6 +517,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, * amdgpu_vmid_reset - reset VMID to zero * * @adev: amdgpu device structure + * @vmhub: vmhub type * @vmid: vmid number to use * * Reset saved GDW, GWS and OA to force switch on next flush. @@ -575,6 +578,9 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) INIT_LIST_HEAD(&id_mgr->ids_lru); atomic_set(&id_mgr->reserved_vmid_num, 0); + /* manage only VMIDs not used by KFD */ + id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; + /* skip over VMID 0, since it is the system VM */ for (j = 1; j < id_mgr->num_ids; ++j) { amdgpu_vmid_reset(adev, i, j); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 8e58325bbca2..0c3b4fa1f936 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -71,9 +71,9 @@ struct amdgpu_vmid_mgr { }; int amdgpu_pasid_alloc(unsigned int bits); -void amdgpu_pasid_free(unsigned int pasid); +void amdgpu_pasid_free(u32 pasid); void amdgpu_pasid_free_delayed(struct dma_resv *resv, - unsigned int pasid); + u32 pasid); bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 6d8f05511aba..f3d62e196901 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -66,7 +66,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, if (ih->ring == NULL) return -ENOMEM; - memset((void *)ih->ring, 0, ih->ring_size + 8); ih->gpu_addr = dma_addr; ih->wptr_addr = dma_addr + ih->ring_size; ih->wptr_cpu = &ih->ring[ih->ring_size / 4]; @@ -100,6 +99,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4; ih->rptr_cpu = &adev->wb.wb[rptr_offs]; } + + init_waitqueue_head(&ih->wait_process); return 0; } @@ -114,9 +115,11 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, */ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { + + if (!ih->ring) + return; + if (ih->use_bus_addr) { - if (!ih->ring) - return; /* add 8 bytes for the rptr/wptr shadows and * add them to the end of the ring allocation. @@ -133,6 +136,83 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) } /** + * amdgpu_ih_ring_write - write IV to the ring buffer + * + * @ih: ih ring to write to + * @iv: the iv to write + * @num_dw: size of the iv in dw + * + * Writes an IV to the ring buffer using the CPU and increment the wptr. + * Used for testing and delegating IVs to a software ring. + */ +void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, + unsigned int num_dw) +{ + uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2; + unsigned int i; + + for (i = 0; i < num_dw; ++i) + ih->ring[wptr++] = cpu_to_le32(iv[i]); + + wptr <<= 2; + wptr &= ih->ptr_mask; + + /* Only commit the new wptr if we don't overflow */ + if (wptr != READ_ONCE(ih->rptr)) { + wmb(); + WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr)); + } +} + +/* Waiter helper that checks current rptr matches or passes checkpoint wptr */ +static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + uint32_t checkpoint_wptr, + uint32_t *prev_rptr) +{ + uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask); + + /* rptr has wrapped. */ + if (cur_rptr < *prev_rptr) + cur_rptr += ih->ptr_mask + 1; + *prev_rptr = cur_rptr; + + /* check ring is empty to workaround missing wptr overflow flag */ + return cur_rptr >= checkpoint_wptr || + (cur_rptr & ih->ptr_mask) == amdgpu_ih_get_wptr(adev, ih); +} + +/** + * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to checkpoint + * + * @adev: amdgpu_device pointer + * @ih: ih ring to process + * + * Used to ensure ring has processed IVs up to the checkpoint write pointer. + */ +int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t checkpoint_wptr, rptr; + + if (!ih->enabled || adev->shutdown) + return -ENODEV; + + checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih); + /* Order wptr with rptr. */ + rmb(); + rptr = READ_ONCE(ih->rptr); + + /* wptr has wrapped. */ + if (rptr > checkpoint_wptr) + checkpoint_wptr += ih->ptr_mask + 1; + + return wait_event_interruptible(ih->wait_process, + amdgpu_ih_has_checkpoint_processed(adev, ih, + checkpoint_wptr, &rptr)); +} + +/** * amdgpu_ih_process - interrupt handler * * @adev: amdgpu_device pointer @@ -152,10 +232,6 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) wptr = amdgpu_ih_get_wptr(adev, ih); restart_ih: - /* is somebody else already processing irqs? */ - if (atomic_xchg(&ih->lock, 1)) - return IRQ_NONE; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ @@ -167,7 +243,7 @@ restart_ih: } amdgpu_ih_set_rptr(adev, ih); - atomic_set(&ih->lock, 0); + wake_up_all(&ih->wait_process); /* make sure wptr hasn't changed while processing */ wptr = amdgpu_ih_get_wptr(adev, ih); @@ -177,3 +253,48 @@ restart_ih: return IRQ_HANDLED; } +/** + * amdgpu_ih_decode_iv_helper - decode an interrupt vector + * + * @adev: amdgpu_device pointer + * @ih: ih ring to process + * @entry: IV entry + * + * Decodes the interrupt vector at the current rptr + * position and also advance the position for for Vega10 + * and later GPUs. + */ +void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry) +{ + /* wptr/rptr are in bytes! */ + u32 ring_index = ih->rptr >> 2; + uint32_t dw[8]; + + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); + dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); + dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); + dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); + dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); + + entry->client_id = dw[0] & 0xff; + entry->src_id = (dw[0] >> 8) & 0xff; + entry->ring_id = (dw[0] >> 16) & 0xff; + entry->vmid = (dw[0] >> 24) & 0xf; + entry->vmid_src = (dw[0] >> 31); + entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); + entry->timestamp_src = dw[2] >> 31; + entry->pasid = dw[3] & 0xffff; + entry->pasid_src = dw[3] >> 31; + entry->src_data[0] = dw[4]; + entry->src_data[1] = dw[5]; + entry->src_data[2] = dw[6]; + entry->src_data[3] = dw[7]; + + /* wptr/rptr are in bytes! */ + ih->rptr += 32; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 4e0bb645176d..0649b59830a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -30,6 +30,18 @@ struct amdgpu_device; struct amdgpu_iv_entry; +struct amdgpu_ih_regs { + uint32_t ih_rb_base; + uint32_t ih_rb_base_hi; + uint32_t ih_rb_cntl; + uint32_t ih_rb_wptr; + uint32_t ih_rb_rptr; + uint32_t ih_doorbell_rptr; + uint32_t ih_rb_wptr_addr_lo; + uint32_t ih_rb_wptr_addr_hi; + uint32_t psp_reg_id; +}; + /* * R6xx+ IH ring */ @@ -52,7 +64,10 @@ struct amdgpu_ih_ring { bool enabled; unsigned rptr; - atomic_t lock; + struct amdgpu_ih_regs ih_regs; + + /* For waiting on IH processing at checkpoint. */ + wait_queue_head_t wait_process; }; /* provided by the ih block */ @@ -72,6 +87,12 @@ struct amdgpu_ih_funcs { int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); +void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, + unsigned int num_dw); +int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih); int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); - +void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 30d540d23b77..cc2e0c9cfe0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -46,15 +46,16 @@ #include <linux/pci.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_irq.h> #include <drm/drm_vblank.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_ih.h" #include "atom.h" #include "amdgpu_connectors.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_ras.h" #include <linux/pm_runtime.h> @@ -64,6 +65,41 @@ #define AMDGPU_WAIT_IDLE_TIMEOUT 200 +const char *soc15_ih_clientid_name[] = { + "IH", + "SDMA2 or ACP", + "ATHUB", + "BIF", + "SDMA3 or DCE", + "SDMA4 or ISP", + "VMC1 or PCIE0", + "RLC", + "SDMA0", + "SDMA1", + "SE0SH", + "SE1SH", + "SE2SH", + "SE3SH", + "VCN1 or UVD1", + "THM", + "VCN or UVD", + "SDMA5 or VCE0", + "VMC", + "SDMA6 or XDMA", + "GRBM_CP", + "ATS", + "ROM_SMUIO", + "DF", + "SDMA7 or VCE1", + "PWR", + "reserved", + "UTCL2", + "EA", + "UTCL2LOG", + "MP0", + "MP1" +}; + /** * amdgpu_hotplug_work_func - work handler for display hotplug event * @@ -84,7 +120,7 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, hotplug_work); - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; struct drm_connector_list_iter iter; @@ -147,10 +183,10 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) * Returns: * result of handling the IRQ, as defined by &irqreturn_t */ -irqreturn_t amdgpu_irq_handler(int irq, void *arg) +static irqreturn_t amdgpu_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); irqreturn_t ret; ret = amdgpu_ih_process(adev, &adev->irq.ih); @@ -162,13 +198,15 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg) * register to check whether the interrupt is triggered or not, and properly * ack the interrupt if it is there */ - if (adev->nbio.funcs && - adev->nbio.funcs->handle_ras_controller_intr_no_bifring) - adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev); - - if (adev->nbio.funcs && - adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring) - adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) { + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring) + adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev); + + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring) + adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + } return ret; } @@ -204,6 +242,21 @@ static void amdgpu_irq_handle_ih2(struct work_struct *work) } /** + * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft + * + * @work: work structure in struct amdgpu_irq + * + * Kick of processing IH soft ring. + */ +static void amdgpu_irq_handle_ih_soft(struct work_struct *work) +{ + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, + irq.ih_soft_work); + + amdgpu_ih_process(adev, &adev->irq.ih_soft); +} + +/** * amdgpu_msi_ok - check whether MSI functionality is enabled * * @adev: amdgpu device pointer (unused) @@ -224,6 +277,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } +static void amdgpu_restore_msix(struct amdgpu_device *adev) +{ + u16 ctrl; + + pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + if (!(ctrl & PCI_MSIX_FLAGS_ENABLE)) + return; + + /* VF FLR */ + ctrl &= ~PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); + ctrl |= PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); +} + /** * amdgpu_irq_init - initialize interrupt handling * @@ -238,6 +306,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) int amdgpu_irq_init(struct amdgpu_device *adev) { int r = 0; + unsigned int irq; spin_lock_init(&adev->irq.lock); @@ -257,7 +326,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); if (nvec > 0) { adev->irq.msi_enabled = true; - dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n"); + dev_dbg(adev->dev, "using MSI/MSI-X.\n"); } } @@ -265,9 +334,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ /* XXX: can this be enabled for DC? */ - adev->ddev->vblank_disable_immediate = true; + adev_to_drm(adev)->vblank_disable_immediate = true; - r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); + r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); if (r) return r; @@ -278,22 +347,49 @@ int amdgpu_irq_init(struct amdgpu_device *adev) INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); + INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft); - adev->irq.installed = true; - /* Use vector 0 for MSI-X */ - r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0)); + /* Use vector 0 for MSI-X. */ + r = pci_irq_vector(adev->pdev, 0); + if (r < 0) + return r; + irq = r; + + /* PCI devices require shared interrupts. */ + r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name, + adev_to_drm(adev)); if (r) { - adev->irq.installed = false; if (!amdgpu_device_has_dc_support(adev)) flush_work(&adev->hotplug_work); return r; } - adev->ddev->max_vblank_count = 0x00ffffff; + adev->irq.installed = true; + adev->irq.irq = irq; + adev_to_drm(adev)->max_vblank_count = 0x00ffffff; DRM_DEBUG("amdgpu: irq initialized.\n"); return 0; } + +void amdgpu_irq_fini_hw(struct amdgpu_device *adev) +{ + if (adev->irq.installed) { + free_irq(adev->irq.irq, adev_to_drm(adev)); + adev->irq.installed = false; + if (adev->irq.msi_enabled) + pci_free_irq_vectors(adev->pdev); + + if (!amdgpu_device_has_dc_support(adev)) + flush_work(&adev->hotplug_work); + } + + amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_ih_ring_fini(adev, &adev->irq.ih1); + amdgpu_ih_ring_fini(adev, &adev->irq.ih2); +} + /** * amdgpu_irq_fini - shut down interrupt handling * @@ -303,19 +399,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev) * functionality, shuts down vblank, hotplug and reset interrupt handling, * turns off interrupts from all sources (all ASICs). */ -void amdgpu_irq_fini(struct amdgpu_device *adev) +void amdgpu_irq_fini_sw(struct amdgpu_device *adev) { unsigned i, j; - if (adev->irq.installed) { - drm_irq_uninstall(adev->ddev); - adev->irq.installed = false; - if (adev->irq.msi_enabled) - pci_free_irq_vectors(adev->pdev); - if (!amdgpu_device_has_dc_support(adev)) - flush_work(&adev->hotplug_work); - } - for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; @@ -328,11 +415,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) kfree(src->enabled_types); src->enabled_types = NULL; - if (src->data) { - kfree(src->data); - kfree(src); - adev->irq.client[i].sources[j] = NULL; - } } kfree(adev->irq.client[i].sources); adev->irq.client[i].sources = NULL; @@ -410,6 +492,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, bool handled = false; int r; + entry.ih = ih; entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; amdgpu_ih_decode_iv(adev, &entry); @@ -424,8 +507,9 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); - } else if (adev->irq.virq[src_id]) { - generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); + } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) && + adev->irq.virq[src_id]) { + generic_handle_domain_irq(adev->irq.domain, src_id); } else if (!adev->irq.client[client_id].sources) { DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", @@ -448,6 +532,24 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, } /** + * amdgpu_irq_delegate - delegate IV to soft IH ring + * + * @adev: amdgpu device pointer + * @entry: IV entry + * @num_dw: size of IV + * + * Delegate the IV to the soft IH ring and schedule processing of it. Used + * if the hardware delegation to IH1 or IH2 doesn't work for some reason. + */ +void amdgpu_irq_delegate(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry, + unsigned int num_dw) +{ + amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw); + schedule_work(&adev->irq.ih_soft_work); +} + +/** * amdgpu_irq_update - update hardware interrupt state * * @adev: amdgpu device pointer @@ -489,6 +591,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) { int i, j, k; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + amdgpu_restore_msix(adev); + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; @@ -496,7 +601,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; - if (!src) + if (!src || !src->funcs || !src->funcs->set) continue; for (k = 0; k < src->num_types; k++) amdgpu_irq_update(adev, src, k); @@ -519,7 +624,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev->ddev->irq_enabled) + if (!adev->irq.installed) return -ENOENT; if (type >= src->num_types) @@ -549,7 +654,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev->ddev->irq_enabled) + if (!adev->irq.installed) return -ENOENT; if (type >= src->num_types) @@ -580,7 +685,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev->ddev->irq_enabled) + if (!adev->irq.installed) return false; if (type >= src->num_types) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index c718e94a55c9..e9f2c11ea416 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -44,6 +44,7 @@ enum amdgpu_interrupt_state { }; struct amdgpu_iv_entry { + struct amdgpu_ih_ring *ih; unsigned client_id; unsigned src_id; unsigned ring_id; @@ -61,7 +62,6 @@ struct amdgpu_irq_src { unsigned num_types; atomic_t *enabled_types; const struct amdgpu_irq_src_funcs *funcs; - void *data; }; struct amdgpu_irq_client { @@ -80,6 +80,7 @@ struct amdgpu_irq_src_funcs { struct amdgpu_irq { bool installed; + unsigned int irq; spinlock_t lock; /* interrupt sources */ struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX]; @@ -88,9 +89,9 @@ struct amdgpu_irq { bool msi_enabled; /* msi enabled */ /* interrupt rings */ - struct amdgpu_ih_ring ih, ih1, ih2; + struct amdgpu_ih_ring ih, ih1, ih2, ih_soft; const struct amdgpu_ih_funcs *ih_funcs; - struct work_struct ih1_work, ih2_work; + struct work_struct ih1_work, ih2_work, ih_soft_work; struct amdgpu_irq_src self_irq; /* gen irq stuff */ @@ -100,15 +101,18 @@ struct amdgpu_irq { }; void amdgpu_irq_disable_all(struct amdgpu_device *adev); -irqreturn_t amdgpu_irq_handler(int irq, void *arg); int amdgpu_irq_init(struct amdgpu_device *adev); -void amdgpu_irq_fini(struct amdgpu_device *adev); +void amdgpu_irq_fini_sw(struct amdgpu_device *adev); +void amdgpu_irq_fini_hw(struct amdgpu_device *adev); int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned client_id, unsigned src_id, struct amdgpu_irq_src *source); void amdgpu_irq_dispatch(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); +void amdgpu_irq_delegate(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry, + unsigned int num_dw); int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4fb20e870e63..bfc47bea23db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -25,21 +25,34 @@ #include <linux/wait.h> #include <linux/sched.h> +#include <drm/drm_drv.h> + #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_timedout(struct drm_sched_job *s_job) +static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); struct amdgpu_task_info ti; + struct amdgpu_device *adev = ring->adev; + int idx; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) { + DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", + __func__, s_job->sched->name); + + /* Effectively the job is aborted as the device is gone */ + return DRM_GPU_SCHED_STAT_ENODEV; + } memset(&ti, 0, sizeof(struct amdgpu_task_info)); - if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { + if (amdgpu_gpu_recovery && + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", s_job->sched->name); - return; + goto exit; } amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); @@ -49,10 +62,17 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", ti.process_name, ti.tgid, ti.task_name, ti.pid); - if (amdgpu_device_should_recover_gpu(ring->adev)) + if (amdgpu_device_should_recover_gpu(ring->adev)) { amdgpu_device_gpu_recover(ring->adev, job); - else + } else { drm_sched_suspend_timeout(&ring->sched); + if (amdgpu_sriov_vf(adev)) + adev->virt.tdr_debug = true; + } + +exit: + drm_dev_exit(idx); + return DRM_GPU_SCHED_STAT_NOMINAL; } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -87,7 +107,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - struct amdgpu_job **job) + enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job) { int r; @@ -95,7 +116,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, if (r) return r; - r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); + r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); if (r) kfree(*job); @@ -106,44 +127,52 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) { struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); struct dma_fence *f; + struct dma_fence *hw_fence; unsigned i; - /* use sched fence if available */ - f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; + if (job->hw_fence.ops == NULL) + hw_fence = job->external_hw_fence; + else + hw_fence = &job->hw_fence; + /* use sched fence if available */ + f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; for (i = 0; i < job->num_ibs; ++i) amdgpu_ib_free(ring->adev, &job->ibs[i], f); } static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { - struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); drm_sched_job_cleanup(s_job); - amdgpu_ring_priority_put(ring, s_job->s_priority); - dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - kfree(job); + + /* only put the hw fence if has embedded fence */ + if (job->hw_fence.ops != NULL) + dma_fence_put(&job->hw_fence); + else + kfree(job); } void amdgpu_job_free(struct amdgpu_job *job) { amdgpu_job_free_resources(job); - - dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); - kfree(job); + + /* only put the hw fence if has embedded fence */ + if (job->hw_fence.ops != NULL) + dma_fence_put(&job->hw_fence); + else + kfree(job); } int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { - enum drm_sched_priority priority; - struct amdgpu_ring *ring; int r; if (!f) @@ -153,14 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, if (r) return r; - job->owner = owner; + drm_sched_job_arm(&job->base); + *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); - priority = job->base.s_priority; - drm_sched_entity_push_job(&job->base, entity); - - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); + drm_sched_entity_push_job(&job->base); return 0; } @@ -172,11 +198,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, job->base.sched = &ring->sched; r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); - job->fence = dma_fence_get(*fence); + /* record external_hw_fence for direct submit */ + job->external_hw_fence = dma_fence_get(*fence); if (r) return r; amdgpu_job_free(job); + dma_fence_put(*fence); + return 0; } @@ -187,17 +216,13 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; struct dma_fence *fence; - bool explicit = false; int r; - fence = amdgpu_sync_get_fence(&job->sync, &explicit); - if (fence && explicit) { - if (drm_sched_dependency_optimized(fence, s_entity)) { - r = amdgpu_sync_fence(ring->adev, &job->sched_sync, - fence, false); - if (r) - DRM_ERROR("Error adding fence (%d)\n", r); - } + fence = amdgpu_sync_get_fence(&job->sync); + if (fence && drm_sched_dependency_optimized(fence, s_entity)) { + r = amdgpu_sync_fence(&job->sched_sync, fence); + if (r) + DRM_ERROR("Error adding fence (%d)\n", r); } while (fence == NULL && vm && !job->vmid) { @@ -207,7 +232,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - fence = amdgpu_sync_get_fence(&job->sync, NULL); + fence = amdgpu_sync_get_fence(&job->sync); } return fence; @@ -238,10 +263,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); } - /* if gpu reset, hw fence will be replaced here */ - dma_fence_put(job->fence); - job->fence = dma_fence_get(fence); + if (!job->job_run_counter) + dma_fence_get(fence); + else if (finished->error < 0) + dma_fence_put(&job->hw_fence); + job->job_run_counter++; amdgpu_job_free_resources(job); fence = r ? ERR_PTR(r) : fence; @@ -258,7 +285,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { struct drm_sched_rq *rq = &sched->sched_rq[i]; if (!rq) @@ -278,7 +305,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) } /* Signal all jobs already scheduled to HW */ - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry(s_job, &sched->pending_list, list) { struct drm_sched_fence *s_fence = s_job->s_fence; dma_fence_set_error(&s_fence->finished, -EHWPOISON); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index dc7ee9358dcd..9e65730193b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -38,6 +38,7 @@ #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) struct amdgpu_fence; +enum amdgpu_ib_pool_type; struct amdgpu_job { struct drm_sched_job base; @@ -45,11 +46,11 @@ struct amdgpu_job { struct amdgpu_sync sync; struct amdgpu_sync sched_sync; struct amdgpu_ib *ibs; - struct dma_fence *fence; /* the hw fence */ + struct dma_fence hw_fence; + struct dma_fence *external_hw_fence; uint32_t preamble_status; uint32_t preemption_status; uint32_t num_ibs; - void *owner; bool vm_needs_flush; uint64_t vm_pd_addr; unsigned vmid; @@ -63,13 +64,14 @@ struct amdgpu_job { uint64_t uf_addr; uint64_t uf_sequence; + /* job_run_counter >= 1 means a resubmit job */ + uint32_t job_run_counter; }; int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job, struct amdgpu_vm *vm); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - struct amdgpu_job **job); - + enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c new file mode 100644 index 000000000000..9342aa23ebd2 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -0,0 +1,218 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15d.h" +#include "soc15_common.h" + +#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000) + +static void amdgpu_jpeg_idle_work_handler(struct work_struct *work); + +int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) +{ + INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler); + mutex_init(&adev->jpeg.jpeg_pg_lock); + atomic_set(&adev->jpeg.total_submission_cnt, 0); + + return 0; +} + +int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec); + } + + mutex_destroy(&adev->jpeg.jpeg_pg_lock); + + return 0; +} + +int amdgpu_jpeg_suspend(struct amdgpu_device *adev) +{ + cancel_delayed_work_sync(&adev->jpeg.idle_work); + + return 0; +} + +int amdgpu_jpeg_resume(struct amdgpu_device *adev) +{ + return 0; +} + +static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, jpeg.idle_work.work); + unsigned int fences = 0; + unsigned int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); + } + + if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, + AMD_PG_STATE_GATE); + else + schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); +} + +void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + atomic_inc(&adev->jpeg.total_submission_cnt); + cancel_delayed_work_sync(&adev->jpeg.idle_work); + + mutex_lock(&adev->jpeg.jpeg_pg_lock); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, + AMD_PG_STATE_UNGATE); + mutex_unlock(&adev->jpeg.jpeg_pg_lock); +} + +void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring) +{ + atomic_dec(&ring->adev->jpeg.total_submission_cnt); + schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); +} + +int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 3); + if (r) + return r; + + amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + +static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, + struct dma_fence **fence) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + struct dma_fence *f = NULL; + const unsigned ib_size_dw = 16; + int i, r; + + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); + if (r) + return r; + + ib = &job->ibs[0]; + + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); + ib->ptr[1] = 0xDEADBEEF; + for (i = 2; i < 16; i += 2) { + ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); + ib->ptr[i+1] = 0; + } + ib->length_dw = 16; + + r = amdgpu_job_submit_direct(job, ring, &f); + if (r) + goto err; + + if (fence) + *fence = dma_fence_get(f); + dma_fence_put(f); + + return 0; + +err: + amdgpu_job_free(job); + return r; +} + +int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + struct dma_fence *fence = NULL; + long r = 0; + + r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence); + if (r) + goto error; + + r = dma_fence_wait_timeout(fence, false, timeout); + if (r == 0) { + r = -ETIMEDOUT; + goto error; + } else if (r < 0) { + goto error; + } else { + r = 0; + } + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + dma_fence_put(fence); +error: + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h new file mode 100644 index 000000000000..55fbff2be761 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_JPEG_H__ +#define __AMDGPU_JPEG_H__ + +#define AMDGPU_MAX_JPEG_INSTANCES 2 + +#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) +#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) + +struct amdgpu_jpeg_reg{ + unsigned jpeg_pitch; +}; + +struct amdgpu_jpeg_inst { + struct amdgpu_ring ring_dec; + struct amdgpu_irq_src irq; + struct amdgpu_jpeg_reg external; +}; + +struct amdgpu_jpeg { + uint8_t num_jpeg_inst; + struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; + struct amdgpu_jpeg_reg internal; + unsigned harvest_config; + struct delayed_work idle_work; + enum amd_powergating_state cur_state; + struct mutex jpeg_pg_lock; + atomic_t total_submission_cnt; +}; + +int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); +int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev); +int amdgpu_jpeg_suspend(struct amdgpu_device *adev); +int amdgpu_jpeg_resume(struct amdgpu_device *adev); + +void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring); + +int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring); +int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); + +#endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b6db28a570c2..dfe667ea8b05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -27,9 +27,8 @@ */ #include "amdgpu.h" -#include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> -#include "amdgpu_sched.h" +#include <drm/drm_drv.h> #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" @@ -78,7 +77,7 @@ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) */ void amdgpu_driver_unload_kms(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (adev == NULL) return; @@ -86,23 +85,18 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) amdgpu_unregister_gpu_instance(adev); if (adev->rmmio == NULL) - goto done_free; - - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); + return; - if (amdgpu_device_is_px(dev)) { + if (adev->runpm) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); } - amdgpu_acpi_fini(adev); - - amdgpu_device_fini(adev); + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) + DRM_WARN("smart shift update failed\n"); -done_free: - kfree(adev); - dev->dev_private = NULL; + amdgpu_acpi_fini(adev); + amdgpu_device_fini_hw(adev); } void amdgpu_register_gpu_instance(struct amdgpu_device *adev) @@ -130,70 +124,149 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) mutex_unlock(&mgpu_info.mutex); } +static void amdgpu_get_audio_func(struct amdgpu_device *adev) +{ + struct pci_dev *p = NULL; + + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (p) { + pm_runtime_get_sync(&p->dev); + + pm_runtime_mark_last_busy(&p->dev); + pm_runtime_put_autosuspend(&p->dev); + + pci_dev_put(p); + } +} + /** * amdgpu_driver_load_kms - Main load function for KMS. * - * @dev: drm dev pointer + * @adev: pointer to struct amdgpu_device * @flags: device flags * * This is the main load function for KMS (all asics). * Returns 0 on success, error on failure. */ -int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) +int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { - struct amdgpu_device *adev; + struct drm_device *dev; + struct pci_dev *parent; int r, acpi_status; - adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); - if (adev == NULL) { - return -ENOMEM; - } - dev->dev_private = (void *)adev; + dev = adev_to_drm(adev); - if ((amdgpu_runtime_pm != 0) && - amdgpu_has_atpx() && + if (amdgpu_has_atpx() && (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(dev->pdev)) + !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) flags |= AMD_IS_PX; + parent = pci_upstream_bridge(adev->pdev); + adev->has_pr3 = parent ? pci_pr3_present(parent) : false; + /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ - r = amdgpu_device_init(adev, dev, dev->pdev, flags); + r = amdgpu_device_init(adev, flags); if (r) { - dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); + dev_err(dev->dev, "Fatal error during GPU init\n"); goto out; } + if (amdgpu_device_supports_px(dev) && + (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */ + adev->runpm = true; + dev_info(adev->dev, "Using ATPX for runtime pm\n"); + } else if (amdgpu_device_supports_boco(dev) && + (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */ + adev->runpm = true; + dev_info(adev->dev, "Using BOCO for runtime pm\n"); + } else if (amdgpu_device_supports_baco(dev) && + (amdgpu_runtime_pm != 0)) { + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + /* enable runpm if runpm=1 */ + if (amdgpu_runtime_pm > 0) + adev->runpm = true; + break; + case CHIP_VEGA10: + /* turn runpm on if noretry=0 */ + if (!adev->gmc.noretry) + adev->runpm = true; + break; + default: + /* enable runpm on CI+ */ + adev->runpm = true; + break; + } + if (adev->runpm) + dev_info(adev->dev, "Using BACO for runtime pm\n"); + } + /* Call ACPI methods: require modeset init * but failure is not fatal */ - if (!r) { - acpi_status = amdgpu_acpi_init(adev); - if (acpi_status) - dev_dbg(&dev->pdev->dev, - "Error during ACPI methods call\n"); - } - if (amdgpu_device_is_px(dev)) { - dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); + acpi_status = amdgpu_acpi_init(adev); + if (acpi_status) + dev_dbg(dev->dev, "Error during ACPI methods call\n"); + + if (adev->runpm) { + /* only need to skip on ATPX */ + if (amdgpu_device_supports_px(dev)) + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* we want direct complete for BOCO */ + if (amdgpu_device_supports_boco(dev)) + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_MAY_SKIP_RESUME); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); - pm_runtime_set_active(dev->dev); + pm_runtime_allow(dev->dev); + pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); + + /* + * For runpm implemented via BACO, PMFW will handle the + * timing for BACO in and out: + * - put ASIC into BACO state only when both video and + * audio functions are in D3 state. + * - pull ASIC out of BACO state when either video or + * audio function is in D0 state. + * Also, at startup, PMFW assumes both functions are in + * D0 state. + * + * So if snd driver was loaded prior to amdgpu driver + * and audio function was put into D3 state, there will + * be no PMFW-aware D-state transition(D0->D3) on runpm + * suspend. Thus the BACO will be not correctly kicked in. + * + * Via amdgpu_get_audio_func(), the audio dev is put + * into D0 state. Then there will be a PMFW-aware D-state + * transition(D0->D3) on runpm suspend. + */ + if (amdgpu_device_supports_baco(dev) && + !(adev->flags & AMD_IS_APU) && + (adev->asic_type >= CHIP_NAVI10)) + amdgpu_get_audio_func(adev); } + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) + DRM_WARN("smart shift update failed\n"); + out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ - if (adev->rmmio && amdgpu_device_is_px(dev)) + if (adev->rmmio && adev->runpm) pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); } @@ -265,14 +338,40 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->feature = 0; break; case AMDGPU_INFO_FW_TA: - if (query_fw->index > 1) + switch (query_fw->index) { + case TA_FW_TYPE_PSP_XGMI: + fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.xgmi_context.context + .bin_desc.feature_version; + break; + case TA_FW_TYPE_PSP_RAS: + fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.ras_context.context + .bin_desc.feature_version; + break; + case TA_FW_TYPE_PSP_HDCP: + fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.hdcp_context.context + .bin_desc.feature_version; + break; + case TA_FW_TYPE_PSP_DTM: + fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.dtm_context.context + .bin_desc.feature_version; + break; + case TA_FW_TYPE_PSP_RAP: + fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.rap_context.context + .bin_desc.feature_version; + break; + case TA_FW_TYPE_PSP_SECUREDISPLAY: + fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version; + fw_info->feature = + adev->psp.securedisplay_context.context.bin_desc + .feature_version; + break; + default: return -EINVAL; - if (query_fw->index == 0) { - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.ta_xgmi_ucode_version; - } else { - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.ta_ras_ucode_version; } break; case AMDGPU_INFO_FW_SDMA: @@ -282,17 +381,25 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; break; case AMDGPU_INFO_FW_SOS: - fw_info->ver = adev->psp.sos_fw_version; - fw_info->feature = adev->psp.sos_feature_version; + fw_info->ver = adev->psp.sos.fw_version; + fw_info->feature = adev->psp.sos.feature_version; break; case AMDGPU_INFO_FW_ASD: - fw_info->ver = adev->psp.asd_fw_version; - fw_info->feature = adev->psp.asd_feature_version; + fw_info->ver = adev->psp.asd_context.bin_desc.fw_version; + fw_info->feature = adev->psp.asd_context.bin_desc.feature_version; break; case AMDGPU_INFO_FW_DMCU: fw_info->ver = adev->dm.dmcu_fw_version; fw_info->feature = 0; break; + case AMDGPU_INFO_FW_DMCUB: + fw_info->ver = adev->dm.dmcub_fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_TOC: + fw_info->ver = adev->psp.toc.fw_version; + fw_info->feature = adev->psp.toc.feature_version; + break; default: return -EINVAL; } @@ -396,12 +503,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: - type = AMD_IP_BLOCK_TYPE_VCN; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->uvd.harvest_config & (1 << i)) + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + if (adev->jpeg.harvest_config & (1 << i)) continue; - if (adev->vcn.inst[i].ring_jpeg.sched.ready) + if (adev->jpeg.inst[i].ring_dec.sched.ready) ++num_rings; } ib_start_alignment = 16; @@ -437,7 +546,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, /** * amdgpu_info_ioctl - answer a device specific request. * - * @adev: amdgpu device pointer + * @dev: drm device pointer * @data: request object * @filp: drm filp * @@ -446,9 +555,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, * etc. (all asics). * Returns 0 on success, -EINVAL on failure. */ -static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_info *info = data; struct amdgpu_mode_info *minfo = &adev->mode_info; void __user *out = (void __user *)(uintptr_t)info->return_pointer; @@ -517,9 +626,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_ENC: - case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; break; + case AMDGPU_HW_IP_VCN_JPEG: + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + break; default: return -EINVAL; } @@ -560,13 +672,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: - ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GTT_USAGE: - ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); + ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GDS_CONFIG: { struct drm_amdgpu_info_gds gds_info; @@ -589,7 +701,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file min(adev->gmc.visible_vram_size - atomic64_read(&adev->visible_pin_size), vram_gtt.vram_size); - vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; + vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size; vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); return copy_to_user(out, &vram_gtt, @@ -597,14 +709,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } case AMDGPU_INFO_MEMORY: { struct drm_amdgpu_memory_info mem; - + struct ttm_resource_manager *vram_man = + ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *gtt_man = + ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; mem.vram.usable_heap_size = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size) - AMDGPU_VM_RESERVED_VRAM; mem.vram.heap_usage = - amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + amdgpu_vram_mgr_usage(vram_man); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -614,16 +729,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file atomic64_read(&adev->visible_pin_size), mem.vram.usable_heap_size); mem.cpu_accessible_vram.heap_usage = - amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + amdgpu_vram_mgr_vis_usage(vram_man); mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.usable_heap_size * 3 / 4; - mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; + mem.gtt.total_heap_size = gtt_man->size; mem.gtt.total_heap_size *= PAGE_SIZE; mem.gtt.usable_heap_size = mem.gtt.total_heap_size - atomic64_read(&adev->gart_pin_size); mem.gtt.heap_usage = - amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); + amdgpu_gtt_mgr_usage(gtt_man); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; return copy_to_user(out, &mem, @@ -644,8 +759,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * in the bitfields */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; if (info->read_mmr_reg.count > 128) return -EINVAL; @@ -673,39 +792,42 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device *dev_info; uint64_t vm_size; + int ret; - dev_info.device_id = dev->pdev->device; - dev_info.chip_rev = adev->rev_id; - dev_info.external_rev = adev->external_rev_id; - dev_info.pci_rev = dev->pdev->revision; - dev_info.family = adev->family; - dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; - dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; + dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); + if (!dev_info) + return -ENOMEM; + + dev_info->device_id = adev->pdev->device; + dev_info->chip_rev = adev->rev_id; + dev_info->external_rev = adev->external_rev_id; + dev_info->pci_rev = adev->pdev->revision; + dev_info->family = adev->family; + dev_info->num_shader_engines = adev->gfx.config.max_shader_engines; + dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; /* return all clocks in KHz */ - dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; + dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; if (adev->pm.dpm_enabled) { - dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; - dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; - } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) { - dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; - dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; + dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; + dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; } else { - dev_info.max_engine_clock = adev->clock.default_sclk * 10; - dev_info.max_memory_clock = adev->clock.default_mclk * 10; + dev_info->max_engine_clock = adev->clock.default_sclk * 10; + dev_info->max_memory_clock = adev->clock.default_mclk * 10; } - dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; - dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * + dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; + dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se * adev->gfx.config.max_shader_engines; - dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; - dev_info._pad = 0; - dev_info.ids_flags = 0; + dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; + dev_info->_pad = 0; + dev_info->ids_flags = 0; if (adev->flags & AMD_IS_APU) - dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION; if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) - dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; + if (amdgpu_is_tmz(adev)) + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size -= AMDGPU_VA_RESERVED_SIZE; @@ -715,45 +837,47 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file adev->vce.fw_version < AMDGPU_VCE_FW_53_45) vm_size = min(vm_size, 1ULL << 40); - dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; - dev_info.virtual_address_max = + dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; + dev_info->virtual_address_max = min(vm_size, AMDGPU_GMC_HOLE_START); if (vm_size > AMDGPU_GMC_HOLE_START) { - dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; - dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; + dev_info->high_va_offset = AMDGPU_GMC_HOLE_END; + dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size; } - dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); - dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; - dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; - dev_info.cu_active_number = adev->gfx.cu_info.number; - dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; - dev_info.ce_ram_size = adev->gfx.ce_ram_size; - memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], + dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); + dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; + dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); + dev_info->cu_active_number = adev->gfx.cu_info.number; + dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; + dev_info->ce_ram_size = adev->gfx.ce_ram_size; + memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], sizeof(adev->gfx.cu_info.ao_cu_bitmap)); - memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], + memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], sizeof(adev->gfx.cu_info.bitmap)); - dev_info.vram_type = adev->gmc.vram_type; - dev_info.vram_bit_width = adev->gmc.vram_width; - dev_info.vce_harvest_config = adev->vce.harvest_config; - dev_info.gc_double_offchip_lds_buf = + dev_info->vram_type = adev->gmc.vram_type; + dev_info->vram_bit_width = adev->gmc.vram_width; + dev_info->vce_harvest_config = adev->vce.harvest_config; + dev_info->gc_double_offchip_lds_buf = adev->gfx.config.double_offchip_lds_buf; - dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; - dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; - dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; - dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; - dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; - dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; - dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; + dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size; + dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs; + dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; + dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; + dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; + dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; + dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; if (adev->family >= AMDGPU_FAMILY_NV) - dev_info.pa_sc_tile_steering_override = + dev_info->pa_sc_tile_steering_override = adev->gfx.config.pa_sc_tile_steering_override; - dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; + dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; - return copy_to_user(out, &dev_info, - min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; + ret = copy_to_user(out, dev_info, + min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0; + kfree(dev_info); + return ret; } case AMDGPU_INFO_VCE_CLOCK_TABLE: { unsigned i; @@ -793,6 +917,21 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file min((size_t)size, (size_t)(bios_size - bios_offset))) ? -EFAULT : 0; } + case AMDGPU_INFO_VBIOS_INFO: { + struct drm_amdgpu_info_vbios vbios_info = {}; + struct atom_context *atom_context; + + atom_context = adev->mode_info.atom_context; + memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name)); + memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn)); + vbios_info.version = atom_context->version; + memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, + sizeof(atom_context->vbios_ver_str)); + memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date)); + + return copy_to_user(out, &vbios_info, + min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0; + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->vbios_info.type); @@ -918,12 +1057,69 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (!ras) return -EINVAL; - ras_mask = (uint64_t)ras->supported << 32 | ras->features; + ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features; return copy_to_user(out, &ras_mask, min_t(u64, size, sizeof(ras_mask))) ? -EFAULT : 0; } + case AMDGPU_INFO_VIDEO_CAPS: { + const struct amdgpu_video_codecs *codecs; + struct drm_amdgpu_info_video_caps *caps; + int r; + + switch (info->video_cap.type) { + case AMDGPU_INFO_VIDEO_CAPS_DECODE: + r = amdgpu_asic_query_video_codecs(adev, false, &codecs); + if (r) + return -EINVAL; + break; + case AMDGPU_INFO_VIDEO_CAPS_ENCODE: + r = amdgpu_asic_query_video_codecs(adev, true, &codecs); + if (r) + return -EINVAL; + break; + default: + DRM_DEBUG_KMS("Invalid request %d\n", + info->video_cap.type); + return -EINVAL; + } + + caps = kzalloc(sizeof(*caps), GFP_KERNEL); + if (!caps) + return -ENOMEM; + + for (i = 0; i < codecs->codec_count; i++) { + int idx = codecs->codec_array[i].codec_type; + + switch (idx) { + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2: + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4: + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1: + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC: + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC: + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG: + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9: + case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1: + caps->codec_info[idx].valid = 1; + caps->codec_info[idx].max_width = + codecs->codec_array[i].max_width; + caps->codec_info[idx].max_height = + codecs->codec_array[i].max_height; + caps->codec_info[idx].max_pixels_per_frame = + codecs->codec_array[i].max_pixels_per_frame; + caps->codec_info[idx].max_level = + codecs->codec_array[i].max_level; + break; + default: + break; + } + } + r = copy_to_user(out, caps, + min((size_t)size, sizeof(*caps))) ? -EFAULT : 0; + kfree(caps); + return r; + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; @@ -959,7 +1155,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev) */ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv; int r, pasid; @@ -976,7 +1172,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) r = pm_runtime_get_sync(dev->dev); if (r < 0) - return r; + goto pm_put; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { @@ -989,10 +1185,15 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) dev_warn(adev->dev, "No more PASIDs available!"); pasid = 0; } - r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid); + + r = amdgpu_vm_init(adev, &fpriv->vm); if (r) goto error_pasid; + r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); + if (r) + goto error_vm; + fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); if (!fpriv->prt_va) { r = -ENOMEM; @@ -1020,13 +1221,16 @@ error_vm: amdgpu_vm_fini(adev, &fpriv->vm); error_pasid: - if (pasid) + if (pasid) { amdgpu_pasid_free(pasid); + amdgpu_vm_set_pasid(adev, &fpriv->vm, 0); + } kfree(fpriv); out_suspend: pm_runtime_mark_last_busy(dev->dev); +pm_put: pm_runtime_put_autosuspend(dev->dev); return r; @@ -1043,11 +1247,11 @@ out_suspend: void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; struct amdgpu_bo *pd; - unsigned int pasid; + u32 pasid; int handle; if (!fpriv) @@ -1071,7 +1275,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, } pasid = fpriv->vm.pasid; - pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); + pd = amdgpu_bo_ref(fpriv->vm.root.bo); amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); amdgpu_vm_fini(adev, &fpriv->vm); @@ -1093,21 +1297,31 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, pm_runtime_put_autosuspend(dev->dev); } + +void amdgpu_driver_release_kms(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + + amdgpu_device_fini_sw(adev); + pci_set_drvdata(adev->pdev, NULL); +} + /* * VBlank related functions. */ /** * amdgpu_get_vblank_counter_kms - get frame count * - * @dev: drm dev pointer - * @pipe: crtc to get the frame count from + * @crtc: crtc to get the frame count from * * Gets the frame count on the requested crtc (all asics). * Returns frame count on success, -EINVAL on failure. */ -u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) +u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc) { - struct amdgpu_device *adev = dev->dev_private; + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; + struct amdgpu_device *adev = drm_to_adev(dev); int vpos, hpos, stat; u32 count; @@ -1166,15 +1380,16 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) /** * amdgpu_enable_vblank_kms - enable vblank interrupt * - * @dev: drm dev pointer - * @pipe: crtc to enable vblank interrupt for + * @crtc: crtc to enable vblank interrupt for * * Enable the interrupt on the requested crtc (all asics). * Returns 0 on success, -EINVAL on failure. */ -int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) +int amdgpu_enable_vblank_kms(struct drm_crtc *crtc) { - struct amdgpu_device *adev = dev->dev_private; + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; + struct amdgpu_device *adev = drm_to_adev(dev); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); return amdgpu_irq_get(adev, &adev->crtc_irq, idx); @@ -1183,55 +1398,44 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) /** * amdgpu_disable_vblank_kms - disable vblank interrupt * - * @dev: drm dev pointer - * @pipe: crtc to disable vblank interrupt for + * @crtc: crtc to disable vblank interrupt for * * Disable the interrupt on the requested crtc (all asics). */ -void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) +void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) { - struct amdgpu_device *adev = dev->dev_private; + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; + struct amdgpu_device *adev = drm_to_adev(dev); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); amdgpu_irq_put(adev, &adev->crtc_irq, idx); } -const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), - DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - /* KMS */ - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) -}; -const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); - /* * Debugfs info */ #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) +static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; int ret, i; + static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { +#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type + TA_FW_NAME(XGMI), + TA_FW_NAME(RAS), + TA_FW_NAME(HDCP), + TA_FW_NAME(DTM), + TA_FW_NAME(RAP), + TA_FW_NAME(SECUREDISPLAY), +#undef TA_FW_NAME + }; + /* VCE */ query_fw.fw_type = AMDGPU_INFO_FW_VCE; ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); @@ -1322,8 +1526,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) fw_info.feature, fw_info.ver); /* MEC2 */ - if (adev->asic_type == CHIP_KAVERI || - (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { + if (adev->gfx.mec2_fw) { query_fw.index = 1; ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) @@ -1350,13 +1553,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) fw_info.feature, fw_info.ver); query_fw.fw_type = AMDGPU_INFO_FW_TA; - for (i = 0; i < 2; i++) { + for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) { query_fw.index = i; ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) continue; - seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n", - i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver); + + seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n", + ta_fw_name[i], fw_info.feature, fw_info.ver); } /* SMC */ @@ -1394,23 +1598,39 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + /* DMCUB */ + query_fw.fw_type = AMDGPU_INFO_FW_DMCUB; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* TOC */ + query_fw.fw_type = AMDGPU_INFO_FW_TOC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); return 0; } -static const struct drm_info_list amdgpu_firmware_info_list[] = { - {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, -}; +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info); + #endif -int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) +void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, - ARRAY_SIZE(amdgpu_firmware_info_list)); -#else - return 0; + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_firmware_info", 0444, root, + adev, &amdgpu_debugfs_firmware_info_fops); + #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c new file mode 100644 index 000000000000..ce538f4819f9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -0,0 +1,117 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu_ras.h" +#include "amdgpu.h" +#include "amdgpu_mca.h" + +#include "umc/umc_6_7_0_offset.h" +#include "umc/umc_6_7_0_sh_mask.h" + +void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr, + unsigned long *error_count) +{ + uint64_t mc_status = RREG64_PCIE(mc_status_addr); + + if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr, + unsigned long *error_count) +{ + uint64_t mc_status = RREG64_PCIE(mc_status_addr); + + if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr) +{ + WREG64_PCIE(mc_status_addr, 0x0ULL); +} + +void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count)); + amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count)); + + amdgpu_mca_reset_error_count(adev, mc_status_addr); +} + +int amdgpu_mca_ras_late_init(struct amdgpu_device *adev, + struct amdgpu_mca_ras *mca_dev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = mca_dev->ras_funcs->sysfs_name, + }; + + if (!mca_dev->ras_if) { + mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!mca_dev->ras_if) + return -ENOMEM; + mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block; + mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block; + mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + } + ih_info.head = fs_info.head = *mca_dev->ras_if; + r = amdgpu_ras_late_init(adev, mca_dev->ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, mca_dev->ras_if->block)) { + kfree(mca_dev->ras_if); + mca_dev->ras_if = NULL; + } + + return r; +} + +void amdgpu_mca_ras_fini(struct amdgpu_device *adev, + struct amdgpu_mca_ras *mca_dev) +{ + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + if (!mca_dev->ras_if) + return; + + amdgpu_ras_late_fini(adev, mca_dev->ras_if, &ih_info); + kfree(mca_dev->ras_if); + mca_dev->ras_if = NULL; +}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h new file mode 100644 index 000000000000..c74bc7177066 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __AMDGPU_MCA_H__ +#define __AMDGPU_MCA_H__ + +struct amdgpu_mca_ras_funcs { + int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*query_ras_error_address)(struct amdgpu_device *adev, + void *ras_error_status); + uint32_t ras_block; + uint32_t ras_sub_block; + const char* sysfs_name; +}; + +struct amdgpu_mca_ras { + struct ras_common_if *ras_if; + const struct amdgpu_mca_ras_funcs *ras_funcs; +}; + +struct amdgpu_mca_funcs { + void (*init)(struct amdgpu_device *adev); +}; + +struct amdgpu_mca { + const struct amdgpu_mca_funcs *funcs; + struct amdgpu_mca_ras mp0; + struct amdgpu_mca_ras mp1; + struct amdgpu_mca_ras mpio; +}; + +void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr, + unsigned long *error_count); + +void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr, + unsigned long *error_count); + +void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr); + +void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, + uint64_t mc_status_addr, + void *ras_error_status); + +int amdgpu_mca_ras_late_init(struct amdgpu_device *adev, + struct amdgpu_mca_ras *mca_dev); + +void amdgpu_mca_ras_fini(struct amdgpu_device *adev, + struct amdgpu_mca_ras *mca_dev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 78fe49033543..7334982ea702 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -24,10 +24,32 @@ #ifndef __AMDGPU_MES_H__ #define __AMDGPU_MES_H__ +#define AMDGPU_MES_MAX_COMPUTE_PIPES 8 +#define AMDGPU_MES_MAX_GFX_PIPES 2 +#define AMDGPU_MES_MAX_SDMA_PIPES 2 + +enum amdgpu_mes_priority_level { + AMDGPU_MES_PRIORITY_LEVEL_LOW = 0, + AMDGPU_MES_PRIORITY_LEVEL_NORMAL = 1, + AMDGPU_MES_PRIORITY_LEVEL_MEDIUM = 2, + AMDGPU_MES_PRIORITY_LEVEL_HIGH = 3, + AMDGPU_MES_PRIORITY_LEVEL_REALTIME = 4, + AMDGPU_MES_PRIORITY_NUM_LEVELS +}; + struct amdgpu_mes_funcs; struct amdgpu_mes { - struct amdgpu_adev *adev; + struct amdgpu_device *adev; + + uint32_t total_max_queue; + uint32_t doorbell_id_offset; + uint32_t max_doorbell_slices; + + uint64_t default_process_quantum; + uint64_t default_gang_quantum; + + struct amdgpu_ring ring; const struct firmware *fw; @@ -45,8 +67,27 @@ struct amdgpu_mes { uint32_t data_fw_version; uint64_t data_start_addr; + /* eop gpu obj */ + struct amdgpu_bo *eop_gpu_obj; + uint64_t eop_gpu_addr; + + void *mqd_backup; + + uint32_t vmid_mask_gfxhub; + uint32_t vmid_mask_mmhub; + uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES]; + uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES]; + uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES]; + uint32_t agreegated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS]; + uint32_t sch_ctx_offs; + uint64_t sch_ctx_gpu_addr; + uint64_t *sch_ctx_ptr; + uint32_t query_status_fence_offs; + uint64_t query_status_fence_gpu_addr; + uint64_t *query_status_fence_ptr; + /* ip specific functions */ - struct amdgpu_mes_funcs *funcs; + const struct amdgpu_mes_funcs *funcs; }; struct mes_add_queue_input { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c index 676c48c02d77..24297dc51434 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "mmhub_err_count", - .debugfs_name = "mmhub_err_inject", }; if (!adev->mmhub.ras_if) { @@ -42,7 +41,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB; adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; adev->mmhub.ras_if->sub_block_index = 0; - strcpy(adev->mmhub.ras_if->name, "mmhub"); } ih_info.head = fs_info.head = *adev->mmhub.ras_if; r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 1cd78940cf82..b27fcbccce2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -21,16 +21,36 @@ #ifndef __AMDGPU_MMHUB_H__ #define __AMDGPU_MMHUB_H__ -struct amdgpu_mmhub_funcs { - void (*ras_init)(struct amdgpu_device *adev); +struct amdgpu_mmhub_ras_funcs { int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); + void *ras_error_status); + void (*query_ras_error_status)(struct amdgpu_device *adev); + void (*reset_ras_error_count)(struct amdgpu_device *adev); + void (*reset_ras_error_status)(struct amdgpu_device *adev); +}; + +struct amdgpu_mmhub_funcs { + u64 (*get_fb_location)(struct amdgpu_device *adev); + void (*init)(struct amdgpu_device *adev); + int (*gart_enable)(struct amdgpu_device *adev); + void (*set_fault_enable_default)(struct amdgpu_device *adev, + bool value); + void (*gart_disable)(struct amdgpu_device *adev); + int (*set_clockgating)(struct amdgpu_device *adev, + enum amd_clockgating_state state); + void (*get_clockgating)(struct amdgpu_device *adev, u32 *flags); + void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); + void (*update_power_gating)(struct amdgpu_device *adev, + bool enable); }; struct amdgpu_mmhub { struct ras_common_if *ras_if; const struct amdgpu_mmhub_funcs *funcs; + const struct amdgpu_mmhub_ras_funcs *ras_funcs; }; int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 828b5167ff12..4b153daf283d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); @@ -155,3 +155,90 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) mmu_interval_notifier_remove(&bo->notifier); bo->notifier.mm = NULL; } + +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, + struct mm_struct *mm, struct page **pages, + uint64_t start, uint64_t npages, + struct hmm_range **phmm_range, bool readonly, + bool mmap_locked, void *owner) +{ + struct hmm_range *hmm_range; + unsigned long timeout; + unsigned long i; + unsigned long *pfns; + int r = 0; + + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); + if (unlikely(!hmm_range)) + return -ENOMEM; + + pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); + if (unlikely(!pfns)) { + r = -ENOMEM; + goto out_free_range; + } + + hmm_range->notifier = notifier; + hmm_range->default_flags = HMM_PFN_REQ_FAULT; + if (!readonly) + hmm_range->default_flags |= HMM_PFN_REQ_WRITE; + hmm_range->hmm_pfns = pfns; + hmm_range->start = start; + hmm_range->end = start + npages * PAGE_SIZE; + hmm_range->dev_private_owner = owner; + + /* Assuming 512MB takes maxmium 1 second to fault page address */ + timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT; + timeout = jiffies + msecs_to_jiffies(timeout); + +retry: + hmm_range->notifier_seq = mmu_interval_read_begin(notifier); + + if (likely(!mmap_locked)) + mmap_read_lock(mm); + + r = hmm_range_fault(hmm_range); + + if (likely(!mmap_locked)) + mmap_read_unlock(mm); + if (unlikely(r)) { + /* + * FIXME: This timeout should encompass the retry from + * mmu_interval_read_retry() as well. + */ + if (r == -EBUSY && !time_after(jiffies, timeout)) + goto retry; + goto out_free_pfns; + } + + /* + * Due to default_flags, all pages are HMM_PFN_VALID or + * hmm_range_fault() fails. FIXME: The pages cannot be touched outside + * the notifier_lock, and mmu_interval_read_retry() must be done first. + */ + for (i = 0; pages && i < npages; i++) + pages[i] = hmm_pfn_to_page(pfns[i]); + + *phmm_range = hmm_range; + + return 0; + +out_free_pfns: + kvfree(pfns); +out_free_range: + kfree(hmm_range); + + return r; +} + +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) +{ + int r; + + r = mmu_interval_read_retry(hmm_range->notifier, + hmm_range->notifier_seq); + kvfree(hmm_range->hmm_pfns); + kfree(hmm_range); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h index a292238f75eb..14a3c1864085 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -30,6 +30,13 @@ #include <linux/workqueue.h> #include <linux/interval_tree.h> +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, + struct mm_struct *mm, struct page **pages, + uint64_t start, uint64_t npages, + struct hmm_range **phmm_range, bool readonly, + bool mmap_locked, void *owner); +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); + #if defined(CONFIG_HMM_MIRROR) int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_mn_unregister(struct amdgpu_bo *bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index eb9975f4decb..89fb372ed49c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -46,6 +46,7 @@ #include <drm/drm_dp_mst_helper.h> #include "modules/inc/mod_freesync.h" +#include "amdgpu_dm_irq_params.h" struct amdgpu_bo; struct amdgpu_device; @@ -301,6 +302,9 @@ struct amdgpu_display_funcs { struct amdgpu_framebuffer { struct drm_framebuffer base; + uint64_t tiling_flags; + bool tmz_surface; + /* caching for later use */ uint64_t address; }; @@ -340,7 +344,7 @@ struct amdgpu_mode_info { /* pointer to fbdev info structure */ struct amdgpu_fbdev *rfbdev; /* firmware flags */ - u16 firmware_flags; + u32 firmware_flags; /* pointer to backlight encoder */ struct amdgpu_encoder *bl_encoder; u8 bl_level; /* saved backlight level */ @@ -404,7 +408,8 @@ struct amdgpu_crtc { struct amdgpu_flip_work *pflip_works; enum amdgpu_flip_status pflip_status; int deferred_flip_completion; - u32 last_flip_vblank; + /* parameters access from DM IRQ handler */ + struct dm_irq_params dm_irq_params; /* pll sharing */ struct amdgpu_atom_ss ss; bool ss_enabled; @@ -469,6 +474,7 @@ struct amdgpu_encoder { struct amdgpu_connector_atom_dig { /* displayport */ u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; u8 dp_sink_type; int dp_clock; int dp_lane_count; @@ -596,6 +602,14 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); +int amdgpu_display_gem_fb_init(struct drm_device *dev, + struct amdgpu_framebuffer *rfb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); +int amdgpu_display_gem_fb_verify_and_init( + struct drm_device *dev, struct amdgpu_framebuffer *rfb, + struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); int amdgpu_display_framebuffer_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, const struct drm_mode_fb_cmd2 *mode_cmd, @@ -612,6 +626,11 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode); int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc); +bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, + bool in_vblank_irq, int *vpos, + int *hpos, ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); + /* fbdev layer */ int amdgpu_fbdev_init(struct amdgpu_device *adev); void amdgpu_fbdev_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 7d5c3a9de9ea..6afb02fef8cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "pcie_bif_err_count", - .debugfs_name = "pcie_bif_err_inject", }; if (!adev->nbio.ras_if) { @@ -40,7 +39,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF; adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; adev->nbio.ras_if->sub_block_index = 0; - strcpy(adev->nbio.ras_if->name, "pcie_bif"); } ih_info.head = fs_info.head = *adev->nbio.ras_if; r = amdgpu_ras_late_init(adev, adev->nbio.ras_if, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 919bd566ba3c..843052205bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -47,15 +47,27 @@ struct nbio_hdp_flush_reg { u32 ref_and_mask_sdma7; }; +struct amdgpu_nbio_ras_funcs { + void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); + void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); + int (*init_ras_controller_interrupt)(struct amdgpu_device *adev); + int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev); + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); +}; + struct amdgpu_nbio_funcs { const struct nbio_hdp_flush_reg *hdp_flush_reg; u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_port_index_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev); u32 (*get_rev_id)(struct amdgpu_device *adev); void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); - void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); u32 (*get_memsize)(struct amdgpu_device *adev); void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, bool use_doorbell, int doorbell_index, int doorbell_size); @@ -77,15 +89,13 @@ struct amdgpu_nbio_funcs { u32 *flags); void (*ih_control)(struct amdgpu_device *adev); void (*init_registers)(struct amdgpu_device *adev); - void (*detect_hw_virt)(struct amdgpu_device *adev); void (*remap_hdp_registers)(struct amdgpu_device *adev); - void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); - void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); - int (*init_ras_controller_interrupt)(struct amdgpu_device *adev); - int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev); - void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - int (*ras_late_init)(struct amdgpu_device *adev); + void (*enable_aspm)(struct amdgpu_device *adev, + bool enable); + void (*program_aspm)(struct amdgpu_device *adev); + void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev); + void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev); + void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); }; struct amdgpu_nbio { @@ -94,6 +104,7 @@ struct amdgpu_nbio { struct amdgpu_irq_src ras_err_event_athub_irq; struct ras_common_if *ras_if; const struct amdgpu_nbio_funcs *funcs; + const struct amdgpu_nbio_ras_funcs *ras_funcs; }; int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e3f16b49e970..aeb92e5677ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -31,6 +31,7 @@ */ #include <linux/list.h> #include <linux/slab.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> #include <drm/drm_cache.h> @@ -51,50 +52,44 @@ * */ -/** - * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting - * - * @bo: &amdgpu_bo buffer object - * - * This function is called when a BO stops being pinned, and updates the - * &amdgpu_device pin_size values accordingly. - */ -static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { - atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); - atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), - &adev->visible_pin_size); - } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { - atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); - } -} - static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) { - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); - if (bo->pin_count > 0) - amdgpu_bo_subtract_pin_size(bo); - amdgpu_bo_kunmap(bo); if (bo->tbo.base.import_attach) drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); drm_gem_object_release(&bo->tbo.base); + amdgpu_bo_unref(&bo->parent); + kvfree(bo); +} + +static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) +{ + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo_user *ubo; + + ubo = to_amdgpu_bo_user(bo); + kfree(ubo->metadata); + amdgpu_bo_destroy(tbo); +} + +static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo_vm *vmbo; + + vmbo = to_amdgpu_bo_vm(bo); /* in case amdgpu_device_recover_vram got NULL of bo->parent */ - if (!list_empty(&bo->shadow_list)) { + if (!list_empty(&vmbo->shadow_list)) { mutex_lock(&adev->shadow_list_lock); - list_del_init(&bo->shadow_list); + list_del_init(&vmbo->shadow_list); mutex_unlock(&adev->shadow_list_lock); } - amdgpu_bo_unref(&bo->parent); - kfree(bo->metadata); - kfree(bo); + amdgpu_bo_destroy(tbo); } /** @@ -109,8 +104,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) */ bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) { - if (bo->destroy == &amdgpu_bo_destroy) + if (bo->destroy == &amdgpu_bo_destroy || + bo->destroy == &amdgpu_bo_user_destroy || + bo->destroy == &amdgpu_bo_vm_destroy) return true; + return false; } @@ -135,8 +133,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + places[c].mem_type = TTM_PL_VRAM; + places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; @@ -151,56 +149,54 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_TT; - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) - places[c].flags |= TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - else - places[c].flags |= TTM_PL_FLAG_CACHED; + places[c].mem_type = + abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ? + AMDGPU_PL_PREEMPT : TTM_PL_TT; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_CPU) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_SYSTEM; - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) - places[c].flags |= TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - else - places[c].flags |= TTM_PL_FLAG_CACHED; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_GDS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + places[c].mem_type = AMDGPU_PL_GDS; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_GWS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + places[c].mem_type = AMDGPU_PL_GWS; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_OA) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + places[c].mem_type = AMDGPU_PL_OA; + places[c].flags = 0; c++; } if (!c) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = 0; c++; } - BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS); + BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS); placement->num_placement = c; placement->placement = places; @@ -251,6 +247,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); if (!*bo_ptr) { r = amdgpu_bo_create(adev, &bp, bo_ptr); @@ -373,6 +370,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (r) return r; + if ((*bo_ptr) == NULL) + return 0; + /* * Remove the original mem node and create a new one at the request * position. @@ -380,14 +380,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (cpu_addr) amdgpu_bo_kunmap(*bo_ptr); - ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource); for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; } r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, - &(*bo_ptr)->tbo.mem, &ctx); + &(*bo_ptr)->tbo.resource, &ctx); if (r) goto error; @@ -441,14 +441,14 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, unsigned long size, u32 domain) { - struct ttm_mem_type_manager *man = NULL; + struct ttm_resource_manager *man = NULL; /* * If GTT is part of requested domains the check must succeed to * allow fall back to GTT */ if (domain & AMDGPU_GEM_DOMAIN_GTT) { - man = &adev->mman.bdev.man[TTM_PL_TT]; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); if (size < (man->size << PAGE_SHIFT)) return true; @@ -457,7 +457,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, } if (domain & AMDGPU_GEM_DOMAIN_VRAM) { - man = &adev->mman.bdev.man[TTM_PL_VRAM]; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); if (size < (man->size << PAGE_SHIFT)) return true; @@ -509,20 +509,31 @@ bool amdgpu_bo_support_uswc(u64 bo_flags) #endif } -static int amdgpu_bo_do_create(struct amdgpu_device *adev, +/** + * amdgpu_bo_create - create an &amdgpu_bo buffer object + * @adev: amdgpu device object + * @bp: parameters to be used for the buffer object + * @bo_ptr: pointer to the buffer object pointer + * + * Creates an &amdgpu_bo buffer object. + * + * Returns: + * 0 for success or a negative error code on failure. + */ +int amdgpu_bo_create(struct amdgpu_device *adev, struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr) { struct ttm_operation_ctx ctx = { .interruptible = (bp->type != ttm_bo_type_kernel), .no_wait_gpu = bp->no_wait_gpu, - .resv = bp->resv, - .flags = bp->type != ttm_bo_type_kernel ? - TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, + .allow_res_evict = bp->type != ttm_bo_type_kernel, + .resv = bp->resv }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; - size_t acc_size; int r; /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ @@ -543,16 +554,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, if (!amdgpu_bo_validate_size(adev, size, bp->domain)) return -ENOMEM; - *bo_ptr = NULL; + BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo)); - acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, - sizeof(struct amdgpu_bo)); - - bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); + *bo_ptr = NULL; + bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL); if (bo == NULL) return -ENOMEM; - drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size); - INIT_LIST_HEAD(&bo->shadow_list); + drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); bo->vm_bo = NULL; bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bp->domain; @@ -575,22 +583,25 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, if (bp->type == ttm_bo_type_kernel) bo->tbo.priority = 1; + if (!bp->destroy) + bp->destroy = &amdgpu_bo_destroy; + r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, - &bo->placement, page_align, &ctx, acc_size, - NULL, bp->resv, &amdgpu_bo_destroy); + &bo->placement, page_align, &ctx, NULL, + bp->resv, bp->destroy); if (unlikely(r != 0)) return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.mem.mem_type == TTM_PL_VRAM && - bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) + bo->tbo.resource->mem_type == TTM_PL_VRAM && + bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && - bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + bo->tbo.resource->mem_type == TTM_PL_VRAM) { struct dma_fence *fence; r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); @@ -621,110 +632,82 @@ fail_unreserve: return r; } -static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, - unsigned long size, - struct amdgpu_bo *bo) +/** + * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object + * @adev: amdgpu device object + * @bp: parameters to be used for the buffer object + * @ubo_ptr: pointer to the buffer object pointer + * + * Create a BO to be used by user application; + * + * Returns: + * 0 for success or a negative error code on failure. + */ + +int amdgpu_bo_create_user(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_user **ubo_ptr) { - struct amdgpu_bo_param bp; + struct amdgpu_bo *bo_ptr; int r; - if (bo->shadow) - return 0; - - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.domain = AMDGPU_GEM_DOMAIN_GTT; - bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_SHADOW; - bp.type = ttm_bo_type_kernel; - bp.resv = bo->tbo.base.resv; - - r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); - if (!r) { - bo->shadow->parent = amdgpu_bo_ref(bo); - mutex_lock(&adev->shadow_list_lock); - list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list); - mutex_unlock(&adev->shadow_list_lock); - } + bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); + bp->destroy = &amdgpu_bo_user_destroy; + r = amdgpu_bo_create(adev, bp, &bo_ptr); + if (r) + return r; + *ubo_ptr = to_amdgpu_bo_user(bo_ptr); return r; } /** - * amdgpu_bo_create - create an &amdgpu_bo buffer object + * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object * @adev: amdgpu device object * @bp: parameters to be used for the buffer object - * @bo_ptr: pointer to the buffer object pointer + * @vmbo_ptr: pointer to the buffer object pointer * - * Creates an &amdgpu_bo buffer object; and if requested, also creates a - * shadow object. - * Shadow object is used to backup the original buffer object, and is always - * in GTT. + * Create a BO to be for GPUVM. * * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_create(struct amdgpu_device *adev, - struct amdgpu_bo_param *bp, - struct amdgpu_bo **bo_ptr) + +int amdgpu_bo_create_vm(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_vm **vmbo_ptr) { - u64 flags = bp->flags; + struct amdgpu_bo *bo_ptr; int r; - bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; - r = amdgpu_bo_do_create(adev, bp, bo_ptr); + /* bo_ptr_size will be determined by the caller and it depends on + * num of amdgpu_vm_pt entries. + */ + BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); + bp->destroy = &amdgpu_bo_vm_destroy; + r = amdgpu_bo_create(adev, bp, &bo_ptr); if (r) return r; - if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { - if (!bp->resv) - WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv, - NULL)); - - r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); - - if (!bp->resv) - dma_resv_unlock((*bo_ptr)->tbo.base.resv); - - if (r) - amdgpu_bo_unref(bo_ptr); - } - + *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); + INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); return r; } /** - * amdgpu_bo_validate - validate an &amdgpu_bo buffer object - * @bo: pointer to the buffer object + * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list * - * Sets placement according to domain; and changes placement and caching - * policy of the buffer object according to the placement. - * This is used for validating shadow bos. It calls ttm_bo_validate() to - * make sure the buffer is resident where it needs to be. + * @vmbo: BO that will be inserted into the shadow list * - * Returns: - * 0 for success or a negative error code on failure. + * Insert a BO to the shadow list. */ -int amdgpu_bo_validate(struct amdgpu_bo *bo) +void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) { - struct ttm_operation_ctx ctx = { false, false }; - uint32_t domain; - int r; + struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev); - if (bo->pin_count) - return 0; - - domain = bo->preferred_domains; - -retry: - amdgpu_bo_placement_from_domain(bo, domain); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { - domain = bo->allowed_domains; - goto retry; - } - - return r; + mutex_lock(&adev->shadow_list_lock); + list_add_tail(&vmbo->shadow_list, &adev->shadow_list); + mutex_unlock(&adev->shadow_list_lock); } /** @@ -752,7 +735,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, amdgpu_bo_size(shadow), NULL, fence, - true, false); + true, false, false); } /** @@ -781,12 +764,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, + MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); if (r) return r; @@ -896,28 +879,30 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count) { + if (bo->tbo.base.import_attach) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else return -EINVAL; } - /* This assumes only APU display buffers are pinned with (VRAM|GTT). - * See function amdgpu_display_supported_domains() - */ - domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); - - if (bo->pin_count) { - uint32_t mem_type = bo->tbo.mem.mem_type; + if (bo->tbo.pin_count) { + uint32_t mem_type = bo->tbo.resource->mem_type; + uint32_t mem_flags = bo->tbo.resource->placement; if (!(domain & amdgpu_mem_type_to_domain(mem_type))) return -EINVAL; - bo->pin_count++; + if ((mem_type == TTM_PL_VRAM) && + (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) && + !(mem_flags & TTM_PL_FLAG_CONTIGUOUS)) + return -EINVAL; + + ttm_bo_pin(&bo->tbo); if (max_offset != 0) { - u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; + u64 domain_start = amdgpu_ttm_domain_start(adev, + mem_type); WARN_ON_ONCE(max_offset < (amdgpu_bo_gpu_offset(bo) - domain_start)); } @@ -925,7 +910,14 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return 0; } - bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + /* This assumes only APU display buffers are pinned with (VRAM|GTT). + * See function amdgpu_display_supported_domains() + */ + domain = amdgpu_bo_get_preferred_domain(adev, domain); + + if (bo->tbo.base.import_attach) + dma_buf_pin(bo->tbo.base.import_attach); + /* force to pin into visible video ram */ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; @@ -941,7 +933,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (!bo->placements[i].lpfn || (lpfn && lpfn < bo->placements[i].lpfn)) bo->placements[i].lpfn = lpfn; - bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); @@ -950,9 +941,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, goto error; } - bo->pin_count = 1; + ttm_bo_pin(&bo->tbo); - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); if (domain == AMDGPU_GEM_DOMAIN_VRAM) { atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), @@ -979,6 +970,7 @@ error: */ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) { + bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; return amdgpu_bo_pin_restricted(bo, domain, 0, 0); } @@ -992,53 +984,24 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_unpin(struct amdgpu_bo *bo) +void amdgpu_bo_unpin(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_operation_ctx ctx = { false, false }; - int r, i; - - if (WARN_ON_ONCE(!bo->pin_count)) { - dev_warn(adev->dev, "%p unpin not necessary\n", bo); - return 0; - } - bo->pin_count--; - if (bo->pin_count) - return 0; - amdgpu_bo_subtract_pin_size(bo); - - for (i = 0; i < bo->placement.num_placement; i++) { - bo->placements[i].lpfn = 0; - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - } - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (unlikely(r)) - dev_err(adev->dev, "%p validate failed for unpin\n", bo); + ttm_bo_unpin(&bo->tbo); + if (bo->tbo.pin_count) + return; - return r; -} + if (bo->tbo.base.import_attach) + dma_buf_unpin(bo->tbo.base.import_attach); -/** - * amdgpu_bo_evict_vram - evict VRAM buffers - * @adev: amdgpu device object - * - * Evicts all VRAM buffers on the lru list of the memory type. - * Mainly used for evicting vram at suspend time. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_evict_vram(struct amdgpu_device *adev) -{ - /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ -#ifndef CONFIG_HIBERNATION - if (adev->flags & AMD_IS_APU) { - /* Useless to evict on IGP chips */ - return 0; + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { + atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); + atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), + &adev->visible_pin_size); + } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { + atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); } -#endif - return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); } static const char *amdgpu_vram_names[] = { @@ -1052,6 +1015,7 @@ static const char *amdgpu_vram_names[] = { "DDR3", "DDR4", "GDDR6", + "DDR5" }; /** @@ -1065,13 +1029,17 @@ static const char *amdgpu_vram_names[] = { */ int amdgpu_bo_init(struct amdgpu_device *adev) { - /* reserve PAT memory space to WC for VRAM */ - arch_io_reserve_memtype_wc(adev->gmc.aper_base, - adev->gmc.aper_size); + /* On A+A platform, VRAM can be mapped as WB */ + if (!adev->gmc.xgmi.connected_to_cpu) { + /* reserve PAT memory space to WC for VRAM */ + arch_io_reserve_memtype_wc(adev->gmc.aper_base, + adev->gmc.aper_size); + + /* Add an MTRR for the VRAM */ + adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, + adev->gmc.aper_size); + } - /* Add an MTRR for the VRAM */ - adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, - adev->gmc.aper_size); DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", adev->gmc.mc_vram_size >> 20, (unsigned long long)adev->gmc.aper_size >> 20); @@ -1081,23 +1049,6 @@ int amdgpu_bo_init(struct amdgpu_device *adev) } /** - * amdgpu_bo_late_init - late init - * @adev: amdgpu device object - * - * Calls amdgpu_ttm_late_init() to free resources used earlier during - * initialization. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_late_init(struct amdgpu_device *adev) -{ - amdgpu_ttm_late_init(adev); - - return 0; -} - -/** * amdgpu_bo_fini - tear down memory manager * @adev: amdgpu device object * @@ -1106,27 +1057,6 @@ int amdgpu_bo_late_init(struct amdgpu_device *adev) void amdgpu_bo_fini(struct amdgpu_device *adev) { amdgpu_ttm_fini(adev); - arch_phys_wc_del(adev->gmc.vram_mtrr); - arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); -} - -/** - * amdgpu_bo_fbdev_mmap - mmap fbdev memory - * @bo: &amdgpu_bo buffer object - * @vma: vma as input from the fbdev mmap method - * - * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, - struct vm_area_struct *vma) -{ - if (vma->vm_pgoff != 0) - return -EACCES; - - return ttm_bo_mmap_obj(vma, &bo->tbo); } /** @@ -1143,12 +1073,15 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_bo_user *ubo; + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); if (adev->family <= AMDGPU_FAMILY_CZ && AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) return -EINVAL; - bo->tiling_flags = tiling_flags; + ubo = to_amdgpu_bo_user(bo); + ubo->tiling_flags = tiling_flags; return 0; } @@ -1162,10 +1095,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) */ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) { + struct amdgpu_bo_user *ubo; + + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); dma_resv_assert_held(bo->tbo.base.resv); + ubo = to_amdgpu_bo_user(bo); if (tiling_flags) - *tiling_flags = bo->tiling_flags; + *tiling_flags = ubo->tiling_flags; } /** @@ -1184,13 +1121,16 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, uint32_t metadata_size, uint64_t flags) { + struct amdgpu_bo_user *ubo; void *buffer; + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); + ubo = to_amdgpu_bo_user(bo); if (!metadata_size) { - if (bo->metadata_size) { - kfree(bo->metadata); - bo->metadata = NULL; - bo->metadata_size = 0; + if (ubo->metadata_size) { + kfree(ubo->metadata); + ubo->metadata = NULL; + ubo->metadata_size = 0; } return 0; } @@ -1202,10 +1142,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, if (buffer == NULL) return -ENOMEM; - kfree(bo->metadata); - bo->metadata_flags = flags; - bo->metadata = buffer; - bo->metadata_size = metadata_size; + kfree(ubo->metadata); + ubo->metadata_flags = flags; + ubo->metadata = buffer; + ubo->metadata_size = metadata_size; return 0; } @@ -1229,21 +1169,26 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, size_t buffer_size, uint32_t *metadata_size, uint64_t *flags) { + struct amdgpu_bo_user *ubo; + if (!buffer && !metadata_size) return -EINVAL; + BUG_ON(bo->tbo.type == ttm_bo_type_kernel); + ubo = to_amdgpu_bo_user(bo); + if (metadata_size) + *metadata_size = ubo->metadata_size; + if (buffer) { - if (buffer_size < bo->metadata_size) + if (buffer_size < ubo->metadata_size) return -EINVAL; - if (bo->metadata_size) - memcpy(buffer, bo->metadata, bo->metadata_size); + if (ubo->metadata_size) + memcpy(buffer, ubo->metadata, ubo->metadata_size); } - if (metadata_size) - *metadata_size = bo->metadata_size; if (flags) - *flags = bo->metadata_flags; + *flags = ubo->metadata_flags; return 0; } @@ -1260,11 +1205,11 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, */ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; if (!amdgpu_bo_is_amdgpu_bo(bo)) return; @@ -1274,6 +1219,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); + if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && + bo->resource->mem_type != TTM_PL_SYSTEM) + dma_buf_move_notify(abo->tbo.base.dma_buf); + /* remember the eviction */ if (evict) atomic64_inc(&adev->num_evictions); @@ -1286,8 +1235,28 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem) +{ + unsigned int domain; + + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + switch (domain) { + case AMDGPU_GEM_DOMAIN_VRAM: + *vram_mem += amdgpu_bo_size(bo); + break; + case AMDGPU_GEM_DOMAIN_GTT: + *gtt_mem += amdgpu_bo_size(bo); + break; + case AMDGPU_GEM_DOMAIN_CPU: + default: + *cpu_mem += amdgpu_bo_size(bo); + break; + } +} + /** - * amdgpu_bo_move_notify - notification about a BO being released + * amdgpu_bo_release_notify - notification about a BO being released * @bo: pointer to a buffer object * * Wipes VRAM buffers whose contents should not be leaked before the @@ -1307,7 +1276,13 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (abo->kfd_bo) amdgpu_amdkfd_unreserve_memory_limit(abo); - if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || + /* We only remove the fence if the resv has individualized. */ + WARN_ON_ONCE(bo->type == ttm_bo_type_kernel + && bo->base.resv != &bo->base._resv); + if (bo->base.resv == &bo->base._resv) + amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); + + if (bo->resource->mem_type != TTM_PL_VRAM || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; @@ -1333,33 +1308,27 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo *abo; - unsigned long offset, size; + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); + unsigned long offset; int r; - if (!amdgpu_bo_is_amdgpu_bo(bo)) - return 0; - - abo = ttm_to_amdgpu_bo(bo); - /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return 0; - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) <= adev->gmc.visible_vram_size) + offset = bo->resource->start << PAGE_SHIFT; + if ((offset + bo->base.size) <= adev->gmc.visible_vram_size) return 0; /* Can't move a pinned BO to visible VRAM */ - if (abo->pin_count > 0) - return -EINVAL; + if (abo->tbo.pin_count > 0) + return VM_FAULT_SIGBUS; /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); @@ -1371,15 +1340,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) abo->placement.busy_placement = &abo->placements[1]; r = ttm_bo_validate(bo, &abo->placement, &ctx); - if (unlikely(r != 0)) - return r; + if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(r)) + return VM_FAULT_SIGBUS; - offset = bo->mem.start << PAGE_SHIFT; + offset = bo->resource->start << PAGE_SHIFT; /* this should never happen */ - if (bo->mem.mem_type == TTM_PL_VRAM && - (offset + size) > adev->gmc.visible_vram_size) - return -EINVAL; + if (bo->resource->mem_type == TTM_PL_VRAM && + (offset + bo->base.size) > adev->gmc.visible_vram_size) + return VM_FAULT_SIGBUS; + ttm_bo_move_to_lru_tail_unlocked(bo); return 0; } @@ -1403,30 +1375,52 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, } /** - * amdgpu_sync_wait_resv - Wait for BO reservation fences + * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences * - * @bo: buffer object + * @adev: amdgpu device pointer + * @resv: reservation object to sync to + * @sync_mode: synchronization mode * @owner: fence owner * @intr: Whether the wait is interruptible * + * Extract the fences from the reservation object and waits for them to finish. + * * Returns: * 0 on success, errno otherwise. */ -int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) +int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode, void *owner, + bool intr) { - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_sync sync; int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false); + amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner); r = amdgpu_sync_wait(&sync, intr); amdgpu_sync_free(&sync); - return r; } /** + * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv + * @bo: buffer object to wait for + * @owner: fence owner + * @intr: Whether the wait is interruptible + * + * Wrapper to wait for fences in a BO. + * Returns: + * 0 on success, errno otherwise. + */ +int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + + return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv, + AMDGPU_SYNC_NE_OWNER, owner, intr); +} + +/** * amdgpu_bo_gpu_offset - return GPU offset of bo * @bo: amdgpu object for which we query the offset * @@ -1438,25 +1432,43 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) */ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM); WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && - !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); - WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && + !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); + WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET); + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM && !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); - return amdgpu_gmc_sign_extend(bo->tbo.offset); + return amdgpu_bo_gpu_offset_no_check(bo); } /** - * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout + * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo + * @bo: amdgpu object for which we query the offset + * + * Returns: + * current GPU offset of the object without raising warnings. + */ +u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + uint64_t offset; + + offset = (bo->tbo.resource->start << PAGE_SHIFT) + + amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); + + return amdgpu_gmc_sign_extend(offset); +} + +/** + * amdgpu_bo_get_preferred_domain - get preferred domain * @adev: amdgpu device object * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>` * * Returns: - * Which of the allowed domains is preferred for pinning the BO for scanout. + * Which of the allowed domains is preferred for allocating the BO. */ -uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, +uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain) { if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { @@ -1466,3 +1478,76 @@ uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, } return domain; } + +#if defined(CONFIG_DEBUG_FS) +#define amdgpu_bo_print_flag(m, bo, flag) \ + do { \ + if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ + seq_printf((m), " " #flag); \ + } \ + } while (0) + +/** + * amdgpu_bo_print_info - print BO info in debugfs file + * + * @id: Index or Id of the BO + * @bo: Requested BO for printing info + * @m: debugfs file + * + * Print BO information in debugfs file + * + * Returns: + * Size of the BO in bytes. + */ +u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) +{ + struct dma_buf_attachment *attachment; + struct dma_buf *dma_buf; + unsigned int domain; + const char *placement; + unsigned int pin_count; + u64 size; + + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + switch (domain) { + case AMDGPU_GEM_DOMAIN_VRAM: + placement = "VRAM"; + break; + case AMDGPU_GEM_DOMAIN_GTT: + placement = " GTT"; + break; + case AMDGPU_GEM_DOMAIN_CPU: + default: + placement = " CPU"; + break; + } + + size = amdgpu_bo_size(bo); + seq_printf(m, "\t\t0x%08x: %12lld byte %s", + id, size, placement); + + pin_count = READ_ONCE(bo->tbo.pin_count); + if (pin_count) + seq_printf(m, " pin count %d", pin_count); + + dma_buf = READ_ONCE(bo->tbo.base.dma_buf); + attachment = READ_ONCE(bo->tbo.base.import_attach); + + if (attachment) + seq_printf(m, " imported from %p", dma_buf); + else if (dma_buf) + seq_printf(m, " exported as %p", dma_buf); + + amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); + amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); + amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC); + amdgpu_bo_print_flag(m, bo, VRAM_CLEARED); + amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS); + amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID); + amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC); + + seq_puts(m, "\n"); + + return size; +} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 36dec51d1ef1..4c9cbdc66995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -30,6 +30,8 @@ #include <drm/amdgpu_drm.h> #include "amdgpu.h" +#include "amdgpu_res_cursor.h" + #ifdef CONFIG_MMU_NOTIFIER #include <linux/mmu_notifier.h> #endif @@ -37,15 +39,24 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX #define AMDGPU_BO_MAX_PLACEMENTS 3 +/* BO flag to indicate a KFD userptr BO */ +#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) +#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62) + +#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) +#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo) + struct amdgpu_bo_param { unsigned long size; int byte_align; + u32 bo_ptr_size; u32 domain; u32 preferred_domain; u64 flags; enum ttm_bo_type type; bool no_wait_gpu; - struct dma_resv *resv; + struct dma_resv *resv; + void (*destroy)(struct ttm_buffer_object *bo); }; /* bo virtual addresses in a vm */ @@ -89,29 +100,31 @@ struct amdgpu_bo { struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; u64 flags; - unsigned pin_count; - u64 tiling_flags; - u64 metadata_flags; - void *metadata; - u32 metadata_size; - unsigned prime_shared_count; /* per VM structure for page tables and with virtual addresses */ struct amdgpu_vm_bo_base *vm_bo; /* Constant after initialization */ struct amdgpu_bo *parent; - struct amdgpu_bo *shadow; - - struct ttm_bo_kmap_obj dma_buf_vmap; - struct amdgpu_mn *mn; - #ifdef CONFIG_MMU_NOTIFIER struct mmu_interval_notifier notifier; #endif + struct kgd_mem *kfd_bo; +}; - struct list_head shadow_list; +struct amdgpu_bo_user { + struct amdgpu_bo bo; + u64 tiling_flags; + u64 metadata_flags; + void *metadata; + u32 metadata_size; - struct kgd_mem *kfd_bo; +}; + +struct amdgpu_bo_vm { + struct amdgpu_bo bo; + struct amdgpu_bo *shadow; + struct list_head shadow_list; + struct amdgpu_vm_bo_base entries[]; }; static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) @@ -160,7 +173,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int r; - r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); + r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(adev->dev, "%p reserve failed\n", bo); @@ -176,17 +189,17 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) { - return bo->tbo.num_pages << PAGE_SHIFT; + return bo->tbo.base.size; } static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) { - return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; + return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; } static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) { - return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; + return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; } /** @@ -206,18 +219,19 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; - struct drm_mm_node *node = bo->tbo.mem.mm_node; - unsigned long pages_left; + struct amdgpu_res_cursor cursor; - if (bo->tbo.mem.mem_type != TTM_PL_VRAM) + if (bo->tbo.resource->mem_type != TTM_PL_VRAM) return false; - for (pages_left = bo->tbo.mem.num_pages; pages_left; - pages_left -= node->size, node++) - if (node->start < fpfn) + amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); + while (cursor.remaining) { + if (cursor.start < adev->gmc.visible_vram_size) return true; + amdgpu_res_next(&cursor, cursor.size); + } + return false; } @@ -229,6 +243,33 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; } +/** + * amdgpu_bo_encrypted - test if the BO is encrypted + * @bo: pointer to a buffer object + * + * Return true if the buffer object is encrypted, false otherwise. + */ +static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) +{ + return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; +} + +/** + * amdgpu_bo_shadowed - check if the BO is shadowed + * + * @bo: BO to be tested. + * + * Returns: + * NULL if not shadowed or else return a BO pointer. + */ +static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) +{ + if (bo->tbo.type == ttm_bo_type_kernel) + return to_amdgpu_bo_vm(bo)->shadow; + + return NULL; +} + bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); @@ -246,6 +287,12 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, uint64_t offset, uint64_t size, uint32_t domain, struct amdgpu_bo **bo_ptr, void **cpu_addr); +int amdgpu_bo_create_user(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_user **ubo_ptr); +int amdgpu_bo_create_vm(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_vm **ubo_ptr); void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); @@ -256,13 +303,9 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo); int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 min_offset, u64 max_offset); -int amdgpu_bo_unpin(struct amdgpu_bo *bo); -int amdgpu_bo_evict_vram(struct amdgpu_device *adev); +void amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_init(struct amdgpu_device *adev); -int amdgpu_bo_late_init(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev); -int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, - struct vm_area_struct *vma); int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, @@ -272,17 +315,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, uint64_t *flags); void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); -int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); +vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); +int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode, void *owner, + bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); -int amdgpu_bo_validate(struct amdgpu_bo *bo); +u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem); +void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence); -uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, +uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain); /* @@ -315,7 +364,9 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, #if defined(CONFIG_DEBUG_FS) void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); +u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); #endif +void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); bool amdgpu_bo_support_uswc(u64 bo_flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c index 1f2305b7bd13..4eaec446b49d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c @@ -80,12 +80,17 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den, * Calculate feedback and reference divider for a given post divider. Makes * sure we stay within the limits. */ -static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, - unsigned fb_div_max, unsigned ref_div_max, - unsigned *fb_div, unsigned *ref_div) +static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom, + unsigned int den, unsigned int post_div, + unsigned int fb_div_max, unsigned int ref_div_max, + unsigned int *fb_div, unsigned int *ref_div) { + /* limit reference * post divider to a maximum */ - ref_div_max = min(128 / post_div, ref_div_max); + if (adev->family == AMDGPU_FAMILY_SI) + ref_div_max = min(100 / post_div, ref_div_max); + else + ref_div_max = min(128 / post_div, ref_div_max); /* get matching reference and feedback divider */ *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); @@ -102,16 +107,18 @@ static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_ * amdgpu_pll_compute - compute PLL paramaters * * @pll: information about the PLL + * @freq: requested frequency * @dot_clock_p: resulting pixel clock - * fb_div_p: resulting feedback divider - * frac_fb_div_p: fractional part of the feedback divider - * ref_div_p: resulting reference divider - * post_div_p: resulting reference divider + * @fb_div_p: resulting feedback divider + * @frac_fb_div_p: fractional part of the feedback divider + * @ref_div_p: resulting reference divider + * @post_div_p: resulting reference divider * * Try to calculate the PLL parameters to generate the given frequency: * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) */ -void amdgpu_pll_compute(struct amdgpu_pll *pll, +void amdgpu_pll_compute(struct amdgpu_device *adev, + struct amdgpu_pll *pll, u32 freq, u32 *dot_clock_p, u32 *fb_div_p, @@ -198,7 +205,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll, for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { unsigned diff; - amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, + amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max, &fb_div, &ref_div); diff = abs(target_clock - (pll->reference_freq * fb_div) / (ref_div * post_div)); @@ -213,7 +220,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll, post_div = post_div_best; /* get the feedback and reference divider for the optimal value */ - amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, + amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max, &fb_div, &ref_div); /* reduce the numbers to a simpler ratio once more */ @@ -308,7 +315,6 @@ int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc) * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc * * @crtc: drm crtc - * @encoder: drm encoder * * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can * be shared (i.e., same clock). diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h index db6136f68b82..44a583d6c9b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h @@ -24,7 +24,8 @@ #ifndef __AMDGPU_PLL_H__ #define __AMDGPU_PLL_H__ -void amdgpu_pll_compute(struct amdgpu_pll *pll, +void amdgpu_pll_compute(struct amdgpu_device *adev, + struct amdgpu_pll *pll, u32 freq, u32 *dot_clock_p, u32 *fb_div_p, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c deleted file mode 100644 index f205f56e3358..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ /dev/null @@ -1,3207 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Rafał Miłecki <zajec5@gmail.com> - * Alex Deucher <alexdeucher@gmail.com> - */ - -#include <drm/drm_debugfs.h> - -#include "amdgpu.h" -#include "amdgpu_drv.h" -#include "amdgpu_pm.h" -#include "amdgpu_dpm.h" -#include "amdgpu_display.h" -#include "amdgpu_smu.h" -#include "atom.h" -#include <linux/power_supply.h> -#include <linux/pci.h> -#include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> -#include <linux/nospec.h> -#include "hwmgr.h" -#define WIDTH_4K 3840 - -static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); - -static const struct cg_flag_name clocks[] = { - {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, - {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, - {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, - {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, - {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, - {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, - {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, - {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, - {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, - {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, - {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, - {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, - {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, - {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, - - {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, - {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, - {0, NULL}, -}; - -static const struct hwmon_temp_label { - enum PP_HWMON_TEMP channel; - const char *label; -} temp_label[] = { - {PP_TEMP_EDGE, "edge"}, - {PP_TEMP_JUNCTION, "junction"}, - {PP_TEMP_MEM, "mem"}, -}; - -void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) -{ - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - if (power_supply_is_system_supplied() > 0) - adev->pm.ac_power = true; - else - adev->pm.ac_power = false; - if (adev->powerplay.pp_funcs->enable_bapm) - amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); - mutex_unlock(&adev->pm.mutex); - } -} - -int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, - void *data, uint32_t *size) -{ - int ret = 0; - - if (!data || !size) - return -EINVAL; - - if (is_support_sw_smu(adev)) - ret = smu_read_sensor(&adev->smu, sensor, data, size); - else { - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) - ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, - sensor, data, size); - else - ret = -EINVAL; - } - - return ret; -} - -/** - * DOC: power_dpm_state - * - * The power_dpm_state file is a legacy interface and is only provided for - * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting - * certain power related parameters. The file power_dpm_state is used for this. - * It accepts the following arguments: - * - * - battery - * - * - balanced - * - * - performance - * - * battery - * - * On older GPUs, the vbios provided a special power state for battery - * operation. Selecting battery switched to this state. This is no - * longer provided on newer GPUs so the option does nothing in that case. - * - * balanced - * - * On older GPUs, the vbios provided a special power state for balanced - * operation. Selecting balanced switched to this state. This is no - * longer provided on newer GPUs so the option does nothing in that case. - * - * performance - * - * On older GPUs, the vbios provided a special power state for performance - * operation. Selecting performance switched to this state. This is no - * longer provided on newer GPUs so the option does nothing in that case. - * - */ - -static ssize_t amdgpu_get_dpm_state(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - enum amd_pm_state_type pm; - - if (is_support_sw_smu(adev)) { - if (adev->smu.ppt_funcs->get_current_power_state) - pm = smu_get_current_power_state(&adev->smu); - else - pm = adev->pm.dpm.user_state; - } else if (adev->powerplay.pp_funcs->get_current_power_state) { - pm = amdgpu_dpm_get_current_power_state(adev); - } else { - pm = adev->pm.dpm.user_state; - } - - return snprintf(buf, PAGE_SIZE, "%s\n", - (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : - (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); -} - -static ssize_t amdgpu_set_dpm_state(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - enum amd_pm_state_type state; - - if (strncmp("battery", buf, strlen("battery")) == 0) - state = POWER_STATE_TYPE_BATTERY; - else if (strncmp("balanced", buf, strlen("balanced")) == 0) - state = POWER_STATE_TYPE_BALANCED; - else if (strncmp("performance", buf, strlen("performance")) == 0) - state = POWER_STATE_TYPE_PERFORMANCE; - else { - count = -EINVAL; - goto fail; - } - - if (is_support_sw_smu(adev)) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.user_state = state; - mutex_unlock(&adev->pm.mutex); - } else if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.user_state = state; - mutex_unlock(&adev->pm.mutex); - - /* Can't set dpm state when the card is off */ - if (!(adev->flags & AMD_IS_PX) || - (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) - amdgpu_pm_compute_clocks(adev); - } -fail: - return count; -} - - -/** - * DOC: power_dpm_force_performance_level - * - * The amdgpu driver provides a sysfs API for adjusting certain power - * related parameters. The file power_dpm_force_performance_level is - * used for this. It accepts the following arguments: - * - * - auto - * - * - low - * - * - high - * - * - manual - * - * - profile_standard - * - * - profile_min_sclk - * - * - profile_min_mclk - * - * - profile_peak - * - * auto - * - * When auto is selected, the driver will attempt to dynamically select - * the optimal power profile for current conditions in the driver. - * - * low - * - * When low is selected, the clocks are forced to the lowest power state. - * - * high - * - * When high is selected, the clocks are forced to the highest power state. - * - * manual - * - * When manual is selected, the user can manually adjust which power states - * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, - * and pp_dpm_pcie files and adjust the power state transition heuristics - * via the pp_power_profile_mode sysfs file. - * - * profile_standard - * profile_min_sclk - * profile_min_mclk - * profile_peak - * - * When the profiling modes are selected, clock and power gating are - * disabled and the clocks are set for different profiling cases. This - * mode is recommended for profiling specific work loads where you do - * not want clock or power gating for clock fluctuation to interfere - * with your results. profile_standard sets the clocks to a fixed clock - * level which varies from asic to asic. profile_min_sclk forces the sclk - * to the lowest level. profile_min_mclk forces the mclk to the lowest level. - * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. - * - */ - -static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - enum amd_dpm_forced_level level = 0xff; - - if (amdgpu_sriov_vf(adev)) - return 0; - - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return snprintf(buf, PAGE_SIZE, "off\n"); - - if (is_support_sw_smu(adev)) - level = smu_get_performance_level(&adev->smu); - else if (adev->powerplay.pp_funcs->get_performance_level) - level = amdgpu_dpm_get_performance_level(adev); - else - level = adev->pm.dpm.forced_level; - - return snprintf(buf, PAGE_SIZE, "%s\n", - (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : - (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : - (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : - (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : - "unknown"); -} - -static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - enum amd_dpm_forced_level level; - enum amd_dpm_forced_level current_level = 0xff; - int ret = 0; - - /* Can't force performance level when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - if (strncmp("low", buf, strlen("low")) == 0) { - level = AMD_DPM_FORCED_LEVEL_LOW; - } else if (strncmp("high", buf, strlen("high")) == 0) { - level = AMD_DPM_FORCED_LEVEL_HIGH; - } else if (strncmp("auto", buf, strlen("auto")) == 0) { - level = AMD_DPM_FORCED_LEVEL_AUTO; - } else if (strncmp("manual", buf, strlen("manual")) == 0) { - level = AMD_DPM_FORCED_LEVEL_MANUAL; - } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { - level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; - } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { - level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; - } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { - level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; - } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { - level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; - } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { - level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - } else { - count = -EINVAL; - goto fail; - } - - /* handle sriov case here */ - if (amdgpu_sriov_vf(adev)) { - if (amdgim_is_hwperf(adev) && - adev->virt.ops->force_dpm_level) { - mutex_lock(&adev->pm.mutex); - adev->virt.ops->force_dpm_level(adev, level); - mutex_unlock(&adev->pm.mutex); - return count; - } else { - return -EINVAL; - } - } - - if (is_support_sw_smu(adev)) - current_level = smu_get_performance_level(&adev->smu); - else if (adev->powerplay.pp_funcs->get_performance_level) - current_level = amdgpu_dpm_get_performance_level(adev); - - if (current_level == level) - return count; - - /* profile_exit setting is valid only when current mode is in profile mode */ - if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && - (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { - pr_err("Currently not in any profile mode!\n"); - return -EINVAL; - } - - if (is_support_sw_smu(adev)) { - ret = smu_force_performance_level(&adev->smu, level); - if (ret) - count = -EINVAL; - } else if (adev->powerplay.pp_funcs->force_performance_level) { - mutex_lock(&adev->pm.mutex); - if (adev->pm.dpm.thermal_active) { - count = -EINVAL; - mutex_unlock(&adev->pm.mutex); - goto fail; - } - ret = amdgpu_dpm_force_performance_level(adev, level); - if (ret) - count = -EINVAL; - else - adev->pm.dpm.forced_level = level; - mutex_unlock(&adev->pm.mutex); - } - -fail: - return count; -} - -static ssize_t amdgpu_get_pp_num_states(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - struct pp_states_info data; - int i, buf_len, ret; - - if (is_support_sw_smu(adev)) { - ret = smu_get_power_num_states(&adev->smu, &data); - if (ret) - return ret; - } else if (adev->powerplay.pp_funcs->get_pp_num_states) - amdgpu_dpm_get_pp_num_states(adev, &data); - - buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); - for (i = 0; i < data.nums; i++) - buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, - (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : - (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : - (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : - (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); - - return buf_len; -} - -static ssize_t amdgpu_get_pp_cur_state(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - struct pp_states_info data; - struct smu_context *smu = &adev->smu; - enum amd_pm_state_type pm = 0; - int i = 0, ret = 0; - - if (is_support_sw_smu(adev)) { - pm = smu_get_current_power_state(smu); - ret = smu_get_power_num_states(smu, &data); - if (ret) - return ret; - } else if (adev->powerplay.pp_funcs->get_current_power_state - && adev->powerplay.pp_funcs->get_pp_num_states) { - pm = amdgpu_dpm_get_current_power_state(adev); - amdgpu_dpm_get_pp_num_states(adev, &data); - } - - for (i = 0; i < data.nums; i++) { - if (pm == data.states[i]) - break; - } - - if (i == data.nums) - i = -EINVAL; - - return snprintf(buf, PAGE_SIZE, "%d\n", i); -} - -static ssize_t amdgpu_get_pp_force_state(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (adev->pp_force_state_enabled) - return amdgpu_get_pp_cur_state(dev, attr, buf); - else - return snprintf(buf, PAGE_SIZE, "\n"); -} - -static ssize_t amdgpu_set_pp_force_state(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - enum amd_pm_state_type state = 0; - unsigned long idx; - int ret; - - if (strlen(buf) == 1) - adev->pp_force_state_enabled = false; - else if (is_support_sw_smu(adev)) - adev->pp_force_state_enabled = false; - else if (adev->powerplay.pp_funcs->dispatch_tasks && - adev->powerplay.pp_funcs->get_pp_num_states) { - struct pp_states_info data; - - ret = kstrtoul(buf, 0, &idx); - if (ret || idx >= ARRAY_SIZE(data.states)) { - count = -EINVAL; - goto fail; - } - idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - - amdgpu_dpm_get_pp_num_states(adev, &data); - state = data.states[idx]; - /* only set user selected power states */ - if (state != POWER_STATE_TYPE_INTERNAL_BOOT && - state != POWER_STATE_TYPE_DEFAULT) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_TASK_ENABLE_USER_STATE, &state); - adev->pp_force_state_enabled = true; - } - } -fail: - return count; -} - -/** - * DOC: pp_table - * - * The amdgpu driver provides a sysfs API for uploading new powerplay - * tables. The file pp_table is used for this. Reading the file - * will dump the current power play table. Writing to the file - * will attempt to upload a new powerplay table and re-initialize - * powerplay using that new table. - * - */ - -static ssize_t amdgpu_get_pp_table(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - char *table = NULL; - int size; - - if (is_support_sw_smu(adev)) { - size = smu_sys_get_pp_table(&adev->smu, (void **)&table); - if (size < 0) - return size; - } - else if (adev->powerplay.pp_funcs->get_pp_table) - size = amdgpu_dpm_get_pp_table(adev, &table); - else - return 0; - - if (size >= PAGE_SIZE) - size = PAGE_SIZE - 1; - - memcpy(buf, table, size); - - return size; -} - -static ssize_t amdgpu_set_pp_table(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret = 0; - - if (is_support_sw_smu(adev)) { - ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); - if (ret) - return ret; - } else if (adev->powerplay.pp_funcs->set_pp_table) - amdgpu_dpm_set_pp_table(adev, buf, count); - - return count; -} - -/** - * DOC: pp_od_clk_voltage - * - * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages - * in each power level within a power state. The pp_od_clk_voltage is used for - * this. - * - * < For Vega10 and previous ASICs > - * - * Reading the file will display: - * - * - a list of engine clock levels and voltages labeled OD_SCLK - * - * - a list of memory clock levels and voltages labeled OD_MCLK - * - * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE - * - * To manually adjust these settings, first select manual using - * power_dpm_force_performance_level. Enter a new value for each - * level by writing a string that contains "s/m level clock voltage" to - * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz - * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at - * 810 mV. When you have edited all of the states as needed, write - * "c" (commit) to the file to commit your changes. If you want to reset to the - * default power levels, write "r" (reset) to the file to reset them. - * - * - * < For Vega20 > - * - * Reading the file will display: - * - * - minimum and maximum engine clock labeled OD_SCLK - * - * - maximum memory clock labeled OD_MCLK - * - * - three <frequency, voltage> points labeled OD_VDDC_CURVE. - * They can be used to calibrate the sclk voltage curve. - * - * - a list of valid ranges for sclk, mclk, and voltage curve points - * labeled OD_RANGE - * - * To manually adjust these settings: - * - * - First select manual using power_dpm_force_performance_level - * - * - For clock frequency setting, enter a new value by writing a - * string that contains "s/m index clock" to the file. The index - * should be 0 if to set minimum clock. And 1 if to set maximum - * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. - * "m 1 800" will update maximum mclk to be 800Mhz. - * - * For sclk voltage curve, enter the new values by writing a - * string that contains "vc point clock voltage" to the file. The - * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will - * update point1 with clock set as 300Mhz and voltage as - * 600mV. "vc 2 1000 1000" will update point3 with clock set - * as 1000Mhz and voltage 1000mV. - * - * - When you have edited all of the states as needed, write "c" (commit) - * to the file to commit your changes - * - * - If you want to reset to the default power levels, write "r" (reset) - * to the file to reset them - * - */ - -static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - uint32_t parameter_size = 0; - long parameter[64]; - char buf_cpy[128]; - char *tmp_str; - char *sub_str; - const char delimiter[3] = {' ', '\n', '\0'}; - uint32_t type; - - if (count > 127) - return -EINVAL; - - if (*buf == 's') - type = PP_OD_EDIT_SCLK_VDDC_TABLE; - else if (*buf == 'm') - type = PP_OD_EDIT_MCLK_VDDC_TABLE; - else if(*buf == 'r') - type = PP_OD_RESTORE_DEFAULT_TABLE; - else if (*buf == 'c') - type = PP_OD_COMMIT_DPM_TABLE; - else if (!strncmp(buf, "vc", 2)) - type = PP_OD_EDIT_VDDC_CURVE; - else - return -EINVAL; - - memcpy(buf_cpy, buf, count+1); - - tmp_str = buf_cpy; - - if (type == PP_OD_EDIT_VDDC_CURVE) - tmp_str++; - while (isspace(*++tmp_str)); - - while (tmp_str[0]) { - sub_str = strsep(&tmp_str, delimiter); - ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); - if (ret) - return -EINVAL; - parameter_size++; - - while (isspace(*tmp_str)) - tmp_str++; - } - - if (is_support_sw_smu(adev)) { - ret = smu_od_edit_dpm_table(&adev->smu, type, - parameter, parameter_size); - - if (ret) - return -EINVAL; - } else { - if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { - ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, - parameter, parameter_size); - if (ret) - return -EINVAL; - } - - if (type == PP_OD_COMMIT_DPM_TABLE) { - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_TASK_READJUST_POWER_STATE, - NULL); - return count; - } else { - return -EINVAL; - } - } - } - - return count; -} - -static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - uint32_t size = 0; - - if (is_support_sw_smu(adev)) { - size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); - size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); - size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); - size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); - return size; - } else if (adev->powerplay.pp_funcs->print_clock_levels) { - size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); - size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); - size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); - return size; - } else { - return snprintf(buf, PAGE_SIZE, "\n"); - } - -} - -/** - * DOC: pp_features - * - * The amdgpu driver provides a sysfs API for adjusting what powerplay - * features to be enabled. The file pp_features is used for this. And - * this is only available for Vega10 and later dGPUs. - * - * Reading back the file will show you the followings: - * - Current ppfeature masks - * - List of the all supported powerplay features with their naming, - * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). - * - * To manually enable or disable a specific feature, just set or clear - * the corresponding bit from original ppfeature masks and input the - * new ppfeature masks. - */ -static ssize_t amdgpu_set_pp_feature_status(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - uint64_t featuremask; - int ret; - - ret = kstrtou64(buf, 0, &featuremask); - if (ret) - return -EINVAL; - - pr_debug("featuremask = 0x%llx\n", featuremask); - - if (is_support_sw_smu(adev)) { - ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); - if (ret) - return -EINVAL; - } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { - ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); - if (ret) - return -EINVAL; - } - - return count; -} - -static ssize_t amdgpu_get_pp_feature_status(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (is_support_sw_smu(adev)) { - return smu_sys_get_pp_feature_mask(&adev->smu, buf); - } else if (adev->powerplay.pp_funcs->get_ppfeature_status) - return amdgpu_dpm_get_ppfeature_status(adev, buf); - - return snprintf(buf, PAGE_SIZE, "\n"); -} - -/** - * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie - * - * The amdgpu driver provides a sysfs API for adjusting what power levels - * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, - * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for - * this. - * - * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for - * Vega10 and later ASICs. - * pp_dpm_fclk interface is only available for Vega20 and later ASICs. - * - * Reading back the files will show you the available power levels within - * the power state and the clock information for those levels. - * - * To manually adjust these states, first select manual using - * power_dpm_force_performance_level. - * Secondly, enter a new value for each level by inputing a string that - * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" - * E.g., - * - * .. code-block:: bash - * - * echo "4 5 6" > pp_dpm_sclk - * - * will enable sclk levels 4, 5, and 6. - * - * NOTE: change to the dcefclk max dpm level is not supported now - */ - -static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) - return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); - - if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); - else - return snprintf(buf, PAGE_SIZE, "\n"); -} - -/* - * Worst case: 32 bits individually specified, in octal at 12 characters - * per line (+1 for \n). - */ -#define AMDGPU_MASK_BUF_MAX (32 * 13) - -static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) -{ - int ret; - long level; - char *sub_str = NULL; - char *tmp; - char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; - const char delimiter[3] = {' ', '\n', '\0'}; - size_t bytes; - - *mask = 0; - - bytes = min(count, sizeof(buf_cpy) - 1); - memcpy(buf_cpy, buf, bytes); - buf_cpy[bytes] = '\0'; - tmp = buf_cpy; - while (tmp[0]) { - sub_str = strsep(&tmp, delimiter); - if (strlen(sub_str)) { - ret = kstrtol(sub_str, 0, &level); - if (ret) - return -EINVAL; - *mask |= 1 << level; - } else - break; - } - - return 0; -} - -static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - uint32_t mask = 0; - - if (amdgpu_sriov_vf(adev)) - return 0; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); - - if (ret) - return -EINVAL; - - return count; -} - -static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) - return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); - - if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); - else - return snprintf(buf, PAGE_SIZE, "\n"); -} - -static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - uint32_t mask = 0; - - if (amdgpu_sriov_vf(adev)) - return 0; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); - - if (ret) - return -EINVAL; - - return count; -} - -static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); - else - return snprintf(buf, PAGE_SIZE, "\n"); -} - -static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - uint32_t mask = 0; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); - - if (ret) - return -EINVAL; - - return count; -} - -static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); - else - return snprintf(buf, PAGE_SIZE, "\n"); -} - -static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - uint32_t mask = 0; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); - - if (ret) - return -EINVAL; - - return count; -} - -static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); - else - return snprintf(buf, PAGE_SIZE, "\n"); -} - -static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - uint32_t mask = 0; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); - - if (ret) - return -EINVAL; - - return count; -} - -static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (is_support_sw_smu(adev)) - return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); - else if (adev->powerplay.pp_funcs->print_clock_levels) - return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); - else - return snprintf(buf, PAGE_SIZE, "\n"); -} - -static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - uint32_t mask = 0; - - ret = amdgpu_read_mask(buf, count, &mask); - if (ret) - return ret; - - if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); - else if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); - - if (ret) - return -EINVAL; - - return count; -} - -static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - uint32_t value = 0; - - if (is_support_sw_smu(adev)) - value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); - else if (adev->powerplay.pp_funcs->get_sclk_od) - value = amdgpu_dpm_get_sclk_od(adev); - - return snprintf(buf, PAGE_SIZE, "%d\n", value); -} - -static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - long int value; - - ret = kstrtol(buf, 0, &value); - - if (ret) { - count = -EINVAL; - goto fail; - } - - if (is_support_sw_smu(adev)) { - value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); - } else { - if (adev->powerplay.pp_funcs->set_sclk_od) - amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); - - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); - } else { - adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); - } - } - -fail: - return count; -} - -static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - uint32_t value = 0; - - if (is_support_sw_smu(adev)) - value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); - else if (adev->powerplay.pp_funcs->get_mclk_od) - value = amdgpu_dpm_get_mclk_od(adev); - - return snprintf(buf, PAGE_SIZE, "%d\n", value); -} - -static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int ret; - long int value; - - ret = kstrtol(buf, 0, &value); - - if (ret) { - count = -EINVAL; - goto fail; - } - - if (is_support_sw_smu(adev)) { - value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); - } else { - if (adev->powerplay.pp_funcs->set_mclk_od) - amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); - - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); - } else { - adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); - } - } - -fail: - return count; -} - -/** - * DOC: pp_power_profile_mode - * - * The amdgpu driver provides a sysfs API for adjusting the heuristics - * related to switching between power levels in a power state. The file - * pp_power_profile_mode is used for this. - * - * Reading this file outputs a list of all of the predefined power profiles - * and the relevant heuristics settings for that profile. - * - * To select a profile or create a custom profile, first select manual using - * power_dpm_force_performance_level. Writing the number of a predefined - * profile to pp_power_profile_mode will enable those heuristics. To - * create a custom set of heuristics, write a string of numbers to the file - * starting with the number of the custom profile along with a setting - * for each heuristic parameter. Due to differences across asic families - * the heuristic parameters vary from family to family. - * - */ - -static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (is_support_sw_smu(adev)) - return smu_get_power_profile_mode(&adev->smu, buf); - else if (adev->powerplay.pp_funcs->get_power_profile_mode) - return amdgpu_dpm_get_power_profile_mode(adev, buf); - - return snprintf(buf, PAGE_SIZE, "\n"); -} - - -static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - int ret = 0xff; - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - uint32_t parameter_size = 0; - long parameter[64]; - char *sub_str, buf_cpy[128]; - char *tmp_str; - uint32_t i = 0; - char tmp[2]; - long int profile_mode = 0; - const char delimiter[3] = {' ', '\n', '\0'}; - - tmp[0] = *(buf); - tmp[1] = '\0'; - ret = kstrtol(tmp, 0, &profile_mode); - if (ret) - goto fail; - - if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (count < 2 || count > 127) - return -EINVAL; - while (isspace(*++buf)) - i++; - memcpy(buf_cpy, buf, count-i); - tmp_str = buf_cpy; - while (tmp_str[0]) { - sub_str = strsep(&tmp_str, delimiter); - ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); - if (ret) { - count = -EINVAL; - goto fail; - } - parameter_size++; - while (isspace(*tmp_str)) - tmp_str++; - } - } - parameter[parameter_size] = profile_mode; - if (is_support_sw_smu(adev)) - ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); - else if (adev->powerplay.pp_funcs->set_power_profile_mode) - ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); - if (!ret) - return count; -fail: - return -EINVAL; -} - -/** - * DOC: busy_percent - * - * The amdgpu driver provides a sysfs API for reading how busy the GPU - * is as a percentage. The file gpu_busy_percent is used for this. - * The SMU firmware computes a percentage of load based on the - * aggregate activity level in the IP cores. - */ -static ssize_t amdgpu_get_busy_percent(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int r, value, size = sizeof(value); - - /* read the IP busy sensor */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, - (void *)&value, &size); - - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", value); -} - -/** - * DOC: mem_busy_percent - * - * The amdgpu driver provides a sysfs API for reading how busy the VRAM - * is as a percentage. The file mem_busy_percent is used for this. - * The SMU firmware computes a percentage of load based on the - * aggregate activity level in the IP cores. - */ -static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - int r, value, size = sizeof(value); - - /* read the IP busy sensor */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, - (void *)&value, &size); - - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", value); -} - -/** - * DOC: pcie_bw - * - * The amdgpu driver provides a sysfs API for estimating how much data - * has been received and sent by the GPU in the last second through PCIe. - * The file pcie_bw is used for this. - * The Perf counters count the number of received and sent messages and return - * those values, as well as the maximum payload size of a PCIe packet (mps). - * Note that it is not possible to easily and quickly obtain the size of each - * packet transmitted, so we output the max payload size (mps) to allow for - * quick estimation of the PCIe bandwidth usage - */ -static ssize_t amdgpu_get_pcie_bw(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - uint64_t count0, count1; - - amdgpu_asic_get_pcie_usage(adev, &count0, &count1); - return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", - count0, count1, pcie_get_mps(adev->pdev)); -} - -/** - * DOC: unique_id - * - * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU - * The file unique_id is used for this. - * This will provide a Unique ID that will persist from machine to machine - * - * NOTE: This will only work for GFX9 and newer. This file will be absent - * on unsupported ASICs (GFX8 and older) - */ -static ssize_t amdgpu_get_unique_id(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - - if (adev->unique_id) - return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); - - return 0; -} - -static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); -static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, - amdgpu_get_dpm_forced_performance_level, - amdgpu_set_dpm_forced_performance_level); -static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); -static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); -static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, - amdgpu_get_pp_force_state, - amdgpu_set_pp_force_state); -static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, - amdgpu_get_pp_table, - amdgpu_set_pp_table); -static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_sclk, - amdgpu_set_pp_dpm_sclk); -static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_mclk, - amdgpu_set_pp_dpm_mclk); -static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_socclk, - amdgpu_set_pp_dpm_socclk); -static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_fclk, - amdgpu_set_pp_dpm_fclk); -static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_dcefclk, - amdgpu_set_pp_dpm_dcefclk); -static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, - amdgpu_get_pp_dpm_pcie, - amdgpu_set_pp_dpm_pcie); -static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, - amdgpu_get_pp_sclk_od, - amdgpu_set_pp_sclk_od); -static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, - amdgpu_get_pp_mclk_od, - amdgpu_set_pp_mclk_od); -static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, - amdgpu_get_pp_power_profile_mode, - amdgpu_set_pp_power_profile_mode); -static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, - amdgpu_get_pp_od_clk_voltage, - amdgpu_set_pp_od_clk_voltage); -static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, - amdgpu_get_busy_percent, NULL); -static DEVICE_ATTR(mem_busy_percent, S_IRUGO, - amdgpu_get_memory_busy_percent, NULL); -static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); -static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, - amdgpu_get_pp_feature_status, - amdgpu_set_pp_feature_status); -static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); - -static ssize_t amdgpu_hwmon_show_temp(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; - int channel = to_sensor_dev_attr(attr)->index; - int r, temp = 0, size = sizeof(temp); - - /* Can't get temperature when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - if (channel >= PP_TEMP_MAX) - return -EINVAL; - - switch (channel) { - case PP_TEMP_JUNCTION: - /* get current junction temperature */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, - (void *)&temp, &size); - if (r) - return r; - break; - case PP_TEMP_EDGE: - /* get current edge temperature */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, - (void *)&temp, &size); - if (r) - return r; - break; - case PP_TEMP_MEM: - /* get current memory temperature */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, - (void *)&temp, &size); - if (r) - return r; - break; - } - - return snprintf(buf, PAGE_SIZE, "%d\n", temp); -} - -static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int hyst = to_sensor_dev_attr(attr)->index; - int temp; - - if (hyst) - temp = adev->pm.dpm.thermal.min_temp; - else - temp = adev->pm.dpm.thermal.max_temp; - - return snprintf(buf, PAGE_SIZE, "%d\n", temp); -} - -static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int hyst = to_sensor_dev_attr(attr)->index; - int temp; - - if (hyst) - temp = adev->pm.dpm.thermal.min_hotspot_temp; - else - temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; - - return snprintf(buf, PAGE_SIZE, "%d\n", temp); -} - -static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int hyst = to_sensor_dev_attr(attr)->index; - int temp; - - if (hyst) - temp = adev->pm.dpm.thermal.min_mem_temp; - else - temp = adev->pm.dpm.thermal.max_mem_crit_temp; - - return snprintf(buf, PAGE_SIZE, "%d\n", temp); -} - -static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - int channel = to_sensor_dev_attr(attr)->index; - - if (channel >= PP_TEMP_MAX) - return -EINVAL; - - return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); -} - -static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int channel = to_sensor_dev_attr(attr)->index; - int temp = 0; - - if (channel >= PP_TEMP_MAX) - return -EINVAL; - - switch (channel) { - case PP_TEMP_JUNCTION: - temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; - break; - case PP_TEMP_EDGE: - temp = adev->pm.dpm.thermal.max_edge_emergency_temp; - break; - case PP_TEMP_MEM: - temp = adev->pm.dpm.thermal.max_mem_emergency_temp; - break; - } - - return snprintf(buf, PAGE_SIZE, "%d\n", temp); -} - -static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - u32 pwm_mode = 0; - if (is_support_sw_smu(adev)) { - pwm_mode = smu_get_fan_control_mode(&adev->smu); - } else { - if (!adev->powerplay.pp_funcs->get_fan_control_mode) - return -EINVAL; - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); - } - - return sprintf(buf, "%i\n", pwm_mode); -} - -static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - int value; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - err = kstrtoint(buf, 10, &value); - if (err) - return err; - - if (is_support_sw_smu(adev)) { - smu_set_fan_control_mode(&adev->smu, value); - } else { - if (!adev->powerplay.pp_funcs->set_fan_control_mode) - return -EINVAL; - - amdgpu_dpm_set_fan_control_mode(adev, value); - } - - return count; -} - -static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%i\n", 0); -} - -static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%i\n", 255); -} - -static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - u32 value; - u32 pwm_mode; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - if (is_support_sw_smu(adev)) - pwm_mode = smu_get_fan_control_mode(&adev->smu); - else - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); - if (pwm_mode != AMD_FAN_CTRL_MANUAL) { - pr_info("manual fan speed control should be enabled first\n"); - return -EINVAL; - } - - err = kstrtou32(buf, 10, &value); - if (err) - return err; - - value = (value * 100) / 255; - - if (is_support_sw_smu(adev)) { - err = smu_set_fan_speed_percent(&adev->smu, value); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) { - err = amdgpu_dpm_set_fan_speed_percent(adev, value); - if (err) - return err; - } - - return count; -} - -static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - u32 speed = 0; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - if (is_support_sw_smu(adev)) { - err = smu_get_fan_speed_percent(&adev->smu, &speed); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) { - err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); - if (err) - return err; - } - - speed = (speed * 255) / 100; - - return sprintf(buf, "%i\n", speed); -} - -static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - u32 speed = 0; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - if (is_support_sw_smu(adev)) { - err = smu_get_fan_speed_rpm(&adev->smu, &speed); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { - err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - if (err) - return err; - } - - return sprintf(buf, "%i\n", speed); -} - -static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - u32 min_rpm = 0; - u32 size = sizeof(min_rpm); - int r; - - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, - (void *)&min_rpm, &size); - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); -} - -static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - u32 max_rpm = 0; - u32 size = sizeof(max_rpm); - int r; - - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, - (void *)&max_rpm, &size); - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); -} - -static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - u32 rpm = 0; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - if (is_support_sw_smu(adev)) { - err = smu_get_fan_speed_rpm(&adev->smu, &rpm); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { - err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - if (err) - return err; - } - - return sprintf(buf, "%i\n", rpm); -} - -static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - u32 value; - u32 pwm_mode; - - if (is_support_sw_smu(adev)) - pwm_mode = smu_get_fan_control_mode(&adev->smu); - else - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); - - if (pwm_mode != AMD_FAN_CTRL_MANUAL) - return -ENODATA; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - err = kstrtou32(buf, 10, &value); - if (err) - return err; - - if (is_support_sw_smu(adev)) { - err = smu_set_fan_speed_rpm(&adev->smu, value); - if (err) - return err; - } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { - err = amdgpu_dpm_set_fan_speed_rpm(adev, value); - if (err) - return err; - } - - return count; -} - -static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - u32 pwm_mode = 0; - - if (is_support_sw_smu(adev)) { - pwm_mode = smu_get_fan_control_mode(&adev->smu); - } else { - if (!adev->powerplay.pp_funcs->get_fan_control_mode) - return -EINVAL; - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); - } - return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); -} - -static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - int value; - u32 pwm_mode; - - /* Can't adjust fan when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - - err = kstrtoint(buf, 10, &value); - if (err) - return err; - - if (value == 0) - pwm_mode = AMD_FAN_CTRL_AUTO; - else if (value == 1) - pwm_mode = AMD_FAN_CTRL_MANUAL; - else - return -EINVAL; - - if (is_support_sw_smu(adev)) { - smu_set_fan_control_mode(&adev->smu, pwm_mode); - } else { - if (!adev->powerplay.pp_funcs->set_fan_control_mode) - return -EINVAL; - amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); - } - - return count; -} - -static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; - u32 vddgfx; - int r, size = sizeof(vddgfx); - - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - /* get the voltage */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, - (void *)&vddgfx, &size); - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); -} - -static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "vddgfx\n"); -} - -static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; - u32 vddnb; - int r, size = sizeof(vddnb); - - /* only APUs have vddnb */ - if (!(adev->flags & AMD_IS_APU)) - return -EINVAL; - - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - /* get the voltage */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, - (void *)&vddnb, &size); - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); -} - -static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "vddnb\n"); -} - -static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; - u32 query = 0; - int r, size = sizeof(u32); - unsigned uw; - - /* Can't get power when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - /* get the voltage */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, - (void *)&query, &size); - if (r) - return r; - - /* convert to microwatts */ - uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; - - return snprintf(buf, PAGE_SIZE, "%u\n", uw); -} - -static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%i\n", 0); -} - -static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - uint32_t limit = 0; - - if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, true, true); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); - } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { - adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); - } else { - return snprintf(buf, PAGE_SIZE, "\n"); - } -} - -static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - uint32_t limit = 0; - - if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, false, true); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); - } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { - adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); - } else { - return snprintf(buf, PAGE_SIZE, "\n"); - } -} - - -static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - int err; - u32 value; - - err = kstrtou32(buf, 10, &value); - if (err) - return err; - - value = value / 1000000; /* convert to Watt */ - - if (is_support_sw_smu(adev)) { - err = smu_set_power_limit(&adev->smu, value); - } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { - err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); - } else { - err = -EINVAL; - } - - if (err) - return err; - - return count; -} - -static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; - uint32_t sclk; - int r, size = sizeof(sclk); - - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - /* get the sclk */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, - (void *)&sclk, &size); - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); -} - -static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "sclk\n"); -} - -static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct amdgpu_device *adev = dev_get_drvdata(dev); - struct drm_device *ddev = adev->ddev; - uint32_t mclk; - int r, size = sizeof(mclk); - - /* Can't get voltage when the card is off */ - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - - /* get the sclk */ - r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, - (void *)&mclk, &size); - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); -} - -static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "mclk\n"); -} - -/** - * DOC: hwmon - * - * The amdgpu driver exposes the following sensor interfaces: - * - * - GPU temperature (via the on-die sensor) - * - * - GPU voltage - * - * - Northbridge voltage (APUs only) - * - * - GPU power - * - * - GPU fan - * - * - GPU gfx/compute engine clock - * - * - GPU memory clock (dGPU only) - * - * hwmon interfaces for GPU temperature: - * - * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius - * - temp2_input and temp3_input are supported on SOC15 dGPUs only - * - * - temp[1-3]_label: temperature channel label - * - temp2_label and temp3_label are supported on SOC15 dGPUs only - * - * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius - * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only - * - * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius - * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only - * - * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius - * - these are supported on SOC15 dGPUs only - * - * hwmon interfaces for GPU voltage: - * - * - in0_input: the voltage on the GPU in millivolts - * - * - in1_input: the voltage on the Northbridge in millivolts - * - * hwmon interfaces for GPU power: - * - * - power1_average: average power used by the GPU in microWatts - * - * - power1_cap_min: minimum cap supported in microWatts - * - * - power1_cap_max: maximum cap supported in microWatts - * - * - power1_cap: selected power cap in microWatts - * - * hwmon interfaces for GPU fan: - * - * - pwm1: pulse width modulation fan level (0-255) - * - * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) - * - * - pwm1_min: pulse width modulation fan control minimum level (0) - * - * - pwm1_max: pulse width modulation fan control maximum level (255) - * - * - fan1_min: an minimum value Unit: revolution/min (RPM) - * - * - fan1_max: an maxmum value Unit: revolution/max (RPM) - * - * - fan1_input: fan speed in RPM - * - * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) - * - * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable - * - * hwmon interfaces for GPU clocks: - * - * - freq1_input: the gfx/compute clock in hertz - * - * - freq2_input: the memory clock in hertz - * - * You can use hwmon tools like sensors to view this information on your system. - * - */ - -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); -static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); -static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); -static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); -static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); -static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); -static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); -static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); -static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); -static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); -static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); -static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); -static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); -static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); -static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); -static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); -static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); -static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); -static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); -static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); -static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); -static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); -static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); -static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); -static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); -static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); -static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); -static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); -static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); -static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); -static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); -static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); -static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); -static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); -static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); - -static struct attribute *hwmon_attributes[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp2_crit.dev_attr.attr, - &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp3_input.dev_attr.attr, - &sensor_dev_attr_temp3_crit.dev_attr.attr, - &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_emergency.dev_attr.attr, - &sensor_dev_attr_temp2_emergency.dev_attr.attr, - &sensor_dev_attr_temp3_emergency.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - &sensor_dev_attr_temp3_label.dev_attr.attr, - &sensor_dev_attr_pwm1.dev_attr.attr, - &sensor_dev_attr_pwm1_enable.dev_attr.attr, - &sensor_dev_attr_pwm1_min.dev_attr.attr, - &sensor_dev_attr_pwm1_max.dev_attr.attr, - &sensor_dev_attr_fan1_input.dev_attr.attr, - &sensor_dev_attr_fan1_min.dev_attr.attr, - &sensor_dev_attr_fan1_max.dev_attr.attr, - &sensor_dev_attr_fan1_target.dev_attr.attr, - &sensor_dev_attr_fan1_enable.dev_attr.attr, - &sensor_dev_attr_in0_input.dev_attr.attr, - &sensor_dev_attr_in0_label.dev_attr.attr, - &sensor_dev_attr_in1_input.dev_attr.attr, - &sensor_dev_attr_in1_label.dev_attr.attr, - &sensor_dev_attr_power1_average.dev_attr.attr, - &sensor_dev_attr_power1_cap_max.dev_attr.attr, - &sensor_dev_attr_power1_cap_min.dev_attr.attr, - &sensor_dev_attr_power1_cap.dev_attr.attr, - &sensor_dev_attr_freq1_input.dev_attr.attr, - &sensor_dev_attr_freq1_label.dev_attr.attr, - &sensor_dev_attr_freq2_input.dev_attr.attr, - &sensor_dev_attr_freq2_label.dev_attr.attr, - NULL -}; - -static umode_t hwmon_attributes_visible(struct kobject *kobj, - struct attribute *attr, int index) -{ - struct device *dev = kobj_to_dev(kobj); - struct amdgpu_device *adev = dev_get_drvdata(dev); - umode_t effective_mode = attr->mode; - - /* Skip fan attributes if fan is not present */ - if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_input.dev_attr.attr || - attr == &sensor_dev_attr_fan1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_max.dev_attr.attr || - attr == &sensor_dev_attr_fan1_target.dev_attr.attr || - attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) - return 0; - - /* Skip fan attributes on APU */ - if ((adev->flags & AMD_IS_APU) && - (attr == &sensor_dev_attr_pwm1.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_input.dev_attr.attr || - attr == &sensor_dev_attr_fan1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_max.dev_attr.attr || - attr == &sensor_dev_attr_fan1_target.dev_attr.attr || - attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) - return 0; - - /* Skip limit attributes if DPM is not enabled */ - if (!adev->pm.dpm_enabled && - (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || - attr == &sensor_dev_attr_pwm1.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_input.dev_attr.attr || - attr == &sensor_dev_attr_fan1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_max.dev_attr.attr || - attr == &sensor_dev_attr_fan1_target.dev_attr.attr || - attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) - return 0; - - if (!is_support_sw_smu(adev)) { - /* mask fan attributes if we have no bindings for this asic to expose */ - if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && - attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ - (!adev->powerplay.pp_funcs->get_fan_control_mode && - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ - effective_mode &= ~S_IRUGO; - - if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && - attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ - (!adev->powerplay.pp_funcs->set_fan_control_mode && - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ - effective_mode &= ~S_IWUSR; - } - - if (((adev->flags & AMD_IS_APU) || - adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ - adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ - (attr == &sensor_dev_attr_power1_average.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| - attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) - return 0; - - if (!is_support_sw_smu(adev)) { - /* hide max/min values if we can't both query and manage the fan */ - if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && - !adev->powerplay.pp_funcs->get_fan_speed_percent) && - (!adev->powerplay.pp_funcs->set_fan_speed_rpm && - !adev->powerplay.pp_funcs->get_fan_speed_rpm) && - (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) - return 0; - - if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && - !adev->powerplay.pp_funcs->get_fan_speed_rpm) && - (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || - attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) - return 0; - } - - if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ - adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ - (attr == &sensor_dev_attr_in0_input.dev_attr.attr || - attr == &sensor_dev_attr_in0_label.dev_attr.attr)) - return 0; - - /* only APUs have vddnb */ - if (!(adev->flags & AMD_IS_APU) && - (attr == &sensor_dev_attr_in1_input.dev_attr.attr || - attr == &sensor_dev_attr_in1_label.dev_attr.attr)) - return 0; - - /* no mclk on APUs */ - if ((adev->flags & AMD_IS_APU) && - (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || - attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) - return 0; - - /* only SOC15 dGPUs support hotspot and mem temperatures */ - if (((adev->flags & AMD_IS_APU) || - adev->asic_type < CHIP_VEGA10) && - (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || - attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || - attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || - attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || - attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || - attr == &sensor_dev_attr_temp2_input.dev_attr.attr || - attr == &sensor_dev_attr_temp3_input.dev_attr.attr || - attr == &sensor_dev_attr_temp2_label.dev_attr.attr || - attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) - return 0; - - return effective_mode; -} - -static const struct attribute_group hwmon_attrgroup = { - .attrs = hwmon_attributes, - .is_visible = hwmon_attributes_visible, -}; - -static const struct attribute_group *hwmon_groups[] = { - &hwmon_attrgroup, - NULL -}; - -void amdgpu_dpm_thermal_work_handler(struct work_struct *work) -{ - struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, - pm.dpm.thermal.work); - /* switch to the thermal state */ - enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; - int temp, size = sizeof(temp); - - if (!adev->pm.dpm_enabled) - return; - - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, - (void *)&temp, &size)) { - if (temp < adev->pm.dpm.thermal.min_temp) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } else { - if (adev->pm.dpm.thermal.high_to_low) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } - mutex_lock(&adev->pm.mutex); - if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) - adev->pm.dpm.thermal_active = true; - else - adev->pm.dpm.thermal_active = false; - adev->pm.dpm.state = dpm_state; - mutex_unlock(&adev->pm.mutex); - - amdgpu_pm_compute_clocks(adev); -} - -static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, - enum amd_pm_state_type dpm_state) -{ - int i; - struct amdgpu_ps *ps; - u32 ui_class; - bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? - true : false; - - /* check if the vblank period is too short to adjust the mclk */ - if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { - if (amdgpu_dpm_vblank_too_short(adev)) - single_display = false; - } - - /* certain older asics have a separare 3D performance state, - * so try that first if the user selected performance - */ - if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) - dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; - /* balanced states don't exist at the moment */ - if (dpm_state == POWER_STATE_TYPE_BALANCED) - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - -restart_search: - /* Pick the best power state based on current conditions */ - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - ps = &adev->pm.dpm.ps[i]; - ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; - switch (dpm_state) { - /* user states */ - case POWER_STATE_TYPE_BATTERY: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_BALANCED: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_PERFORMANCE: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - /* internal states */ - case POWER_STATE_TYPE_INTERNAL_UVD: - if (adev->pm.dpm.uvd_ps) - return adev->pm.dpm.uvd_ps; - else - break; - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_BOOT: - return adev->pm.dpm.boot_ps; - case POWER_STATE_TYPE_INTERNAL_THERMAL: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ACPI: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ULV: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_3DPERF: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - return ps; - break; - default: - break; - } - } - /* use a fallback state if we didn't match */ - switch (dpm_state) { - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (adev->pm.dpm.uvd_ps) { - return adev->pm.dpm.uvd_ps; - } else { - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - } - case POWER_STATE_TYPE_INTERNAL_THERMAL: - dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_ACPI: - dpm_state = POWER_STATE_TYPE_BATTERY; - goto restart_search; - case POWER_STATE_TYPE_BATTERY: - case POWER_STATE_TYPE_BALANCED: - case POWER_STATE_TYPE_INTERNAL_3DPERF: - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - default: - break; - } - - return NULL; -} - -static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) -{ - struct amdgpu_ps *ps; - enum amd_pm_state_type dpm_state; - int ret; - bool equal = false; - - /* if dpm init failed */ - if (!adev->pm.dpm_enabled) - return; - - if (adev->pm.dpm.user_state != adev->pm.dpm.state) { - /* add other state override checks here */ - if ((!adev->pm.dpm.thermal_active) && - (!adev->pm.dpm.uvd_active)) - adev->pm.dpm.state = adev->pm.dpm.user_state; - } - dpm_state = adev->pm.dpm.state; - - ps = amdgpu_dpm_pick_power_state(adev, dpm_state); - if (ps) - adev->pm.dpm.requested_ps = ps; - else - return; - - if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { - printk("switching from power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); - } - - /* update whether vce is active */ - ps->vce_active = adev->pm.dpm.vce_active; - if (adev->powerplay.pp_funcs->display_configuration_changed) - amdgpu_dpm_display_configuration_changed(adev); - - ret = amdgpu_dpm_pre_set_power_state(adev); - if (ret) - return; - - if (adev->powerplay.pp_funcs->check_state_equal) { - if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) - equal = false; - } - - if (equal) - return; - - amdgpu_dpm_set_power_state(adev); - amdgpu_dpm_post_set_power_state(adev); - - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - - if (adev->powerplay.pp_funcs->force_performance_level) { - if (adev->pm.dpm.thermal_active) { - enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; - /* force low perf level for thermal */ - amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); - /* save the user's level */ - adev->pm.dpm.forced_level = level; - } else { - /* otherwise, user selected level */ - amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); - } - } -} - -void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) -{ - int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable); - if (ret) - DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n", - enable ? "true" : "false", ret); - } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { - /* enable/disable UVD */ - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); - mutex_unlock(&adev->pm.mutex); - } - /* enable/disable Low Memory PState for UVD (4k videos) */ - if (adev->asic_type == CHIP_STONEY && - adev->uvd.decode_image_width >= WIDTH_4K) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - - if (hwmgr && hwmgr->hwmgr_func && - hwmgr->hwmgr_func->update_nbdpm_pstate) - hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, - !enable, - true); - } -} - -void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) -{ - int ret = 0; - if (is_support_sw_smu(adev)) { - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable); - if (ret) - DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n", - enable ? "true" : "false", ret); - } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { - /* enable/disable VCE */ - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); - mutex_unlock(&adev->pm.mutex); - } -} - -void amdgpu_pm_print_power_states(struct amdgpu_device *adev) -{ - int i; - - if (adev->powerplay.pp_funcs->print_power_state == NULL) - return; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) - amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); - -} - -int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) -{ - int ret = 0; - - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) - return ret; - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - - return ret; -} - -void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev) -{ - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) - return; - - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); -} - -int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) -{ - int r; - - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { - r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); - if (r) { - pr_err("smu firmware loading failed\n"); - return r; - } - *smu_version = adev->pm.fw_version; - } - return 0; -} - -int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) -{ - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret; - - if (adev->pm.sysfs_initialized) - return 0; - - if (adev->pm.dpm_enabled == 0) - return 0; - - adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, - DRIVER_NAME, adev, - hwmon_groups); - if (IS_ERR(adev->pm.int_hwmon_dev)) { - ret = PTR_ERR(adev->pm.int_hwmon_dev); - dev_err(adev->dev, - "Unable to register hwmon device: %d\n", ret); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; - } - - - ret = device_create_file(adev->dev, &dev_attr_pp_num_states); - if (ret) { - DRM_ERROR("failed to create device file pp_num_states\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); - if (ret) { - DRM_ERROR("failed to create device file pp_cur_state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_force_state); - if (ret) { - DRM_ERROR("failed to create device file pp_force_state\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_table); - if (ret) { - DRM_ERROR("failed to create device file pp_table\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - - /* Arcturus does not support standalone mclk/socclk/fclk level setting */ - if (adev->asic_type == CHIP_ARCTURUS) { - dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_mclk.store = NULL; - - dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_socclk.store = NULL; - - dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; - dev_attr_pp_dpm_fclk.store = NULL; - } - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - if (adev->asic_type >= CHIP_VEGA10) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_socclk\n"); - return ret; - } - if (adev->asic_type != CHIP_ARCTURUS) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); - return ret; - } - } - } - if (adev->asic_type >= CHIP_VEGA20) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_fclk\n"); - return ret; - } - } - if (adev->asic_type != CHIP_ARCTURUS) { - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; - } - } - ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); - if (ret) { - DRM_ERROR("failed to create device file pp_sclk_od\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); - if (ret) { - DRM_ERROR("failed to create device file pp_mclk_od\n"); - return ret; - } - ret = device_create_file(adev->dev, - &dev_attr_pp_power_profile_mode); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_power_profile_mode\n"); - return ret; - } - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { - ret = device_create_file(adev->dev, - &dev_attr_pp_od_clk_voltage); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_od_clk_voltage\n"); - return ret; - } - } - ret = device_create_file(adev->dev, - &dev_attr_gpu_busy_percent); - if (ret) { - DRM_ERROR("failed to create device file " - "gpu_busy_level\n"); - return ret; - } - /* APU does not have its own dedicated memory */ - if (!(adev->flags & AMD_IS_APU) && - (adev->asic_type != CHIP_VEGA10)) { - ret = device_create_file(adev->dev, - &dev_attr_mem_busy_percent); - if (ret) { - DRM_ERROR("failed to create device file " - "mem_busy_percent\n"); - return ret; - } - } - /* PCIe Perf counters won't work on APU nodes */ - if (!(adev->flags & AMD_IS_APU)) { - ret = device_create_file(adev->dev, &dev_attr_pcie_bw); - if (ret) { - DRM_ERROR("failed to create device file pcie_bw\n"); - return ret; - } - } - if (adev->unique_id) - ret = device_create_file(adev->dev, &dev_attr_unique_id); - if (ret) { - DRM_ERROR("failed to create device file unique_id\n"); - return ret; - } - ret = amdgpu_debugfs_pm_init(adev); - if (ret) { - DRM_ERROR("Failed to register debugfs file for dpm!\n"); - return ret; - } - - if ((adev->asic_type >= CHIP_VEGA10) && - !(adev->flags & AMD_IS_APU)) { - ret = device_create_file(adev->dev, - &dev_attr_pp_features); - if (ret) { - DRM_ERROR("failed to create device file " - "pp_features\n"); - return ret; - } - } - - adev->pm.sysfs_initialized = true; - - return 0; -} - -void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) -{ - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - - if (adev->pm.dpm_enabled == 0) - return; - - if (adev->pm.int_hwmon_dev) - hwmon_device_unregister(adev->pm.int_hwmon_dev); - device_remove_file(adev->dev, &dev_attr_power_dpm_state); - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - - device_remove_file(adev->dev, &dev_attr_pp_num_states); - device_remove_file(adev->dev, &dev_attr_pp_cur_state); - device_remove_file(adev->dev, &dev_attr_pp_force_state); - device_remove_file(adev->dev, &dev_attr_pp_table); - - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (adev->asic_type >= CHIP_VEGA10) { - device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); - if (adev->asic_type != CHIP_ARCTURUS) - device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); - } - if (adev->asic_type != CHIP_ARCTURUS) - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (adev->asic_type >= CHIP_VEGA20) - device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); - device_remove_file(adev->dev, &dev_attr_pp_sclk_od); - device_remove_file(adev->dev, &dev_attr_pp_mclk_od); - device_remove_file(adev->dev, - &dev_attr_pp_power_profile_mode); - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - device_remove_file(adev->dev, - &dev_attr_pp_od_clk_voltage); - device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); - if (!(adev->flags & AMD_IS_APU) && - (adev->asic_type != CHIP_VEGA10)) - device_remove_file(adev->dev, &dev_attr_mem_busy_percent); - if (!(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_pcie_bw); - if (adev->unique_id) - device_remove_file(adev->dev, &dev_attr_unique_id); - if ((adev->asic_type >= CHIP_VEGA10) && - !(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_pp_features); -} - -void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) -{ - int i = 0; - - if (!adev->pm.dpm_enabled) - return; - - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } - - if (is_support_sw_smu(adev)) { - struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; - smu_handle_task(&adev->smu, - smu_dpm->dpm_level, - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, - true); - } else { - if (adev->powerplay.pp_funcs->dispatch_tasks) { - if (!amdgpu_device_has_dc_support(adev)) { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - if (adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); - mutex_unlock(&adev->pm.mutex); - } - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); - } else { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - amdgpu_dpm_change_power_state_locked(adev); - mutex_unlock(&adev->pm.mutex); - } - } -} - -/* - * Debugfs info - */ -#if defined(CONFIG_DEBUG_FS) - -static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) -{ - uint32_t value; - uint64_t value64; - uint32_t query = 0; - int size; - - /* GPU Clocks */ - size = sizeof(value); - seq_printf(m, "GFX Clocks and Power:\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (MCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (SCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) - seq_printf(m, "\t%u mV (VDDGFX)\n", value); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) - seq_printf(m, "\t%u mV (VDDNB)\n", value); - size = sizeof(uint32_t); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); - size = sizeof(value); - seq_printf(m, "\n"); - - /* GPU Temp */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) - seq_printf(m, "GPU Temperature: %u C\n", value/1000); - - /* GPU Load */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) - seq_printf(m, "GPU Load: %u %%\n", value); - /* MEM Load */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) - seq_printf(m, "MEM Load: %u %%\n", value); - - seq_printf(m, "\n"); - - /* SMC feature mask */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) - seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - - if (adev->asic_type > CHIP_VEGA20) { - /* VCN clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCN: Disabled\n"); - } else { - seq_printf(m, "VCN: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); - } - } - seq_printf(m, "\n"); - } else { - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); - } - } - seq_printf(m, "\n"); - - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); - } - } - } - - return 0; -} - -static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) -{ - int i; - - for (i = 0; clocks[i].flag; i++) - seq_printf(m, "\t%s: %s\n", clocks[i].name, - (flags & clocks[i].flag) ? "On" : "Off"); -} - -static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - struct drm_device *ddev = adev->ddev; - u32 flags = 0; - - amdgpu_device_ip_get_clockgating_state(adev, &flags); - seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); - amdgpu_parse_cg_state(m, flags); - seq_printf(m, "\n"); - - if (!adev->pm.dpm_enabled) { - seq_printf(m, "dpm not enabled\n"); - return 0; - } - if ((adev->flags & AMD_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { - seq_printf(m, "PX asic powered off\n"); - } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { - mutex_lock(&adev->pm.mutex); - if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) - adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); - else - seq_printf(m, "Debugfs support not implemented for this asic\n"); - mutex_unlock(&adev->pm.mutex); - } else { - return amdgpu_debugfs_pm_info_pp(m, adev); - } - - return 0; -} - -static const struct drm_info_list amdgpu_pm_info_list[] = { - {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, -}; -#endif - -static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); -#else - return 0; -#endif -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c index 0e6dba9f60f0..82e9ecf84352 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c @@ -19,17 +19,29 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Author: Jonathan Kim <jonathan.kim@amd.com> - * */ #include <linux/perf_event.h> #include <linux/init.h> #include "amdgpu.h" #include "amdgpu_pmu.h" -#include "df_v3_6.h" #define PMU_NAME_SIZE 32 +#define NUM_FORMATS_AMDGPU_PMU 4 +#define NUM_FORMATS_DF_VEGA20 3 +#define NUM_EVENTS_DF_VEGA20 8 +#define NUM_EVENT_TYPES_VEGA20 1 +#define NUM_EVENTS_VEGA20_XGMI 2 +#define NUM_EVENTS_VEGA20_MAX NUM_EVENTS_VEGA20_XGMI +#define NUM_EVENT_TYPES_ARCTURUS 1 +#define NUM_EVENTS_ARCTURUS_XGMI 6 +#define NUM_EVENTS_ARCTURUS_MAX NUM_EVENTS_ARCTURUS_XGMI + +struct amdgpu_pmu_event_attribute { + struct device_attribute attr; + const char *event_str; + unsigned int type; +}; /* record to keep track of pmu entry per pmu type per device */ struct amdgpu_pmu_entry { @@ -37,11 +49,162 @@ struct amdgpu_pmu_entry { struct amdgpu_device *adev; struct pmu pmu; unsigned int pmu_perf_type; + char *pmu_type_name; + char *pmu_file_prefix; + struct attribute_group fmt_attr_group; + struct amdgpu_pmu_event_attribute *fmt_attr; + struct attribute_group evt_attr_group; + struct amdgpu_pmu_event_attribute *evt_attr; }; +static ssize_t amdgpu_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr; + + amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute, + attr); + + if (!amdgpu_pmu_attr->type) + return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str); + + return sprintf(buf, "%s,type=0x%x\n", + amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type); +} + static LIST_HEAD(amdgpu_pmu_list); +struct amdgpu_pmu_attr { + const char *name; + const char *config; +}; + +struct amdgpu_pmu_type { + const unsigned int type; + const unsigned int num_of_type; +}; + +struct amdgpu_pmu_config { + struct amdgpu_pmu_attr *formats; + unsigned int num_formats; + struct amdgpu_pmu_attr *events; + unsigned int num_events; + struct amdgpu_pmu_type *types; + unsigned int num_types; +}; + +/* + * Events fall under two categories: + * - PMU typed + * Events in /sys/bus/event_source/devices/amdgpu_<pmu_type>_<dev_num> have + * performance counter operations handled by one IP <pmu_type>. Formats and + * events should be defined by <pmu_type>_<asic_type>_formats and + * <pmu_type>_<asic_type>_events respectively. + * + * - Event config typed + * Events in /sys/bus/event_source/devices/amdgpu_<dev_num> have performance + * counter operations that can be handled by multiple IPs dictated by their + * "type" format field. Formats and events should be defined by + * amdgpu_pmu_formats and <asic_type>_events respectively. Format field + * "type" is generated in amdgpu_pmu_event_show and defined in + * <asic_type>_event_config_types. + */ + +static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = { + { .name = "event", .config = "config:0-7" }, + { .name = "instance", .config = "config:8-15" }, + { .name = "umask", .config = "config:16-23"}, + { .name = "type", .config = "config:56-63"} +}; + +/* Vega20 events */ +static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = { + { .name = "xgmi_link0_data_outbound", + .config = "event=0x7,instance=0x46,umask=0x2" }, + { .name = "xgmi_link1_data_outbound", + .config = "event=0x7,instance=0x47,umask=0x2" } +}; + +static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = { + { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI, + .num_of_type = NUM_EVENTS_VEGA20_XGMI } +}; + +static struct amdgpu_pmu_config vega20_config = { + .formats = amdgpu_pmu_formats, + .num_formats = ARRAY_SIZE(amdgpu_pmu_formats), + .events = vega20_events, + .num_events = ARRAY_SIZE(vega20_events), + .types = vega20_types, + .num_types = ARRAY_SIZE(vega20_types) +}; + +/* Vega20 data fabric (DF) events */ +static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = { + { .name = "event", .config = "config:0-7" }, + { .name = "instance", .config = "config:8-15" }, + { .name = "umask", .config = "config:16-23"} +}; + +static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = { + { .name = "cake0_pcsout_txdata", + .config = "event=0x7,instance=0x46,umask=0x2" }, + { .name = "cake1_pcsout_txdata", + .config = "event=0x7,instance=0x47,umask=0x2" }, + { .name = "cake0_pcsout_txmeta", + .config = "event=0x7,instance=0x46,umask=0x4" }, + { .name = "cake1_pcsout_txmeta", + .config = "event=0x7,instance=0x47,umask=0x4" }, + { .name = "cake0_ftiinstat_reqalloc", + .config = "event=0xb,instance=0x46,umask=0x4" }, + { .name = "cake1_ftiinstat_reqalloc", + .config = "event=0xb,instance=0x47,umask=0x4" }, + { .name = "cake0_ftiinstat_rspalloc", + .config = "event=0xb,instance=0x46,umask=0x8" }, + { .name = "cake1_ftiinstat_rspalloc", + .config = "event=0xb,instance=0x47,umask=0x8" } +}; + +static struct amdgpu_pmu_config df_vega20_config = { + .formats = df_vega20_formats, + .num_formats = ARRAY_SIZE(df_vega20_formats), + .events = df_vega20_events, + .num_events = ARRAY_SIZE(df_vega20_events), + .types = NULL, + .num_types = 0 +}; + +/* Arcturus events */ +static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = { + { .name = "xgmi_link0_data_outbound", + .config = "event=0x7,instance=0x4b,umask=0x2" }, + { .name = "xgmi_link1_data_outbound", + .config = "event=0x7,instance=0x4c,umask=0x2" }, + { .name = "xgmi_link2_data_outbound", + .config = "event=0x7,instance=0x4d,umask=0x2" }, + { .name = "xgmi_link3_data_outbound", + .config = "event=0x7,instance=0x4e,umask=0x2" }, + { .name = "xgmi_link4_data_outbound", + .config = "event=0x7,instance=0x4f,umask=0x2" }, + { .name = "xgmi_link5_data_outbound", + .config = "event=0x7,instance=0x50,umask=0x2" } +}; + +static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = { + { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI, + .num_of_type = NUM_EVENTS_ARCTURUS_XGMI } +}; + +static struct amdgpu_pmu_config arcturus_config = { + .formats = amdgpu_pmu_formats, + .num_formats = ARRAY_SIZE(amdgpu_pmu_formats), + .events = arcturus_events, + .num_events = ARRAY_SIZE(arcturus_events), + .types = arcturus_types, + .num_types = ARRAY_SIZE(arcturus_types) +}; + /* initialize perf counter */ static int amdgpu_perf_event_init(struct perf_event *event) { @@ -52,7 +215,8 @@ static int amdgpu_perf_event_init(struct perf_event *event) return -ENOENT; /* update the hw_perf_event struct with config data */ - hwc->conf = event->attr.config; + hwc->config = event->attr.config; + hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE; return 0; } @@ -64,6 +228,7 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry, pmu); + int target_cntr = 0; if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -71,19 +236,27 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; - switch (pe->pmu_perf_type) { - case PERF_TYPE_AMDGPU_DF: - if (!(flags & PERF_EF_RELOAD)) - pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1); + switch (hwc->config_base) { + case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: + case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: + if (!(flags & PERF_EF_RELOAD)) { + target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, + hwc->config, 0 /* unused */, + 1 /* add counter */); + if (target_cntr < 0) + break; + + hwc->idx = target_cntr; + } - pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0); + pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, + hwc->idx, 0); break; default: break; } perf_event_update_userpage(event); - } /* read perf counter */ @@ -93,21 +266,21 @@ static void amdgpu_perf_read(struct perf_event *event) struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry, pmu); - u64 count, prev; do { prev = local64_read(&hwc->prev_count); - switch (pe->pmu_perf_type) { - case PERF_TYPE_AMDGPU_DF: - pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf, - &count); + switch (hwc->config_base) { + case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: + case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: + pe->adev->df.funcs->pmc_get_count(pe->adev, + hwc->config, hwc->idx, &count); break; default: count = 0; break; - }; + } } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev); local64_add(count - prev, &event->count); @@ -124,13 +297,15 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) if (hwc->state & PERF_HES_UPTODATE) return; - switch (pe->pmu_perf_type) { - case PERF_TYPE_AMDGPU_DF: - pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0); + switch (hwc->config_base) { + case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: + case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, + 0); break; default: break; - }; + } WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; @@ -142,25 +317,43 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) hwc->state |= PERF_HES_UPTODATE; } -/* add perf counter */ +/* add perf counter */ static int amdgpu_perf_add(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - int retval; - + int retval = 0, target_cntr; struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry, pmu); + switch (pe->pmu_perf_type) { + case AMDGPU_PMU_PERF_TYPE_DF: + hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF; + break; + case AMDGPU_PMU_PERF_TYPE_ALL: + hwc->config_base = (hwc->config >> + AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT) & + AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK; + break; + } + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; - switch (pe->pmu_perf_type) { - case PERF_TYPE_AMDGPU_DF: - retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1); + switch (hwc->config_base) { + case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: + case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: + target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, + hwc->config, 0 /* unused */, + 1 /* add counter */); + if (target_cntr < 0) + retval = target_cntr; + else + hwc->idx = target_cntr; + break; default: return 0; - }; + } if (retval) return retval; @@ -169,7 +362,6 @@ static int amdgpu_perf_add(struct perf_event *event, int flags) amdgpu_perf_start(event, PERF_EF_RELOAD); return retval; - } /* delete perf counter */ @@ -182,36 +374,105 @@ static void amdgpu_perf_del(struct perf_event *event, int flags) amdgpu_perf_stop(event, PERF_EF_UPDATE); - switch (pe->pmu_perf_type) { - case PERF_TYPE_AMDGPU_DF: - pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1); + switch (hwc->config_base) { + case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: + case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, + 1); break; default: break; - }; + } perf_event_update_userpage(event); } -/* vega20 pmus */ +static void amdgpu_pmu_create_event_attrs_by_type( + struct attribute_group *attr_group, + struct amdgpu_pmu_event_attribute *pmu_attr, + struct amdgpu_pmu_attr events[], + int s_offset, + int e_offset, + unsigned int type) +{ + int i; + + pmu_attr += s_offset; + + for (i = s_offset; i < e_offset; i++) { + attr_group->attrs[i] = &pmu_attr->attr.attr; + sysfs_attr_init(&pmu_attr->attr.attr); + pmu_attr->attr.attr.name = events[i].name; + pmu_attr->attr.attr.mode = 0444; + pmu_attr->attr.show = amdgpu_pmu_event_show; + pmu_attr->event_str = events[i].config; + pmu_attr->type = type; + pmu_attr++; + } +} -/* init pmu tracking per pmu type */ -static int init_pmu_by_type(struct amdgpu_device *adev, - const struct attribute_group *attr_groups[], - char *pmu_type_name, char *pmu_file_prefix, - unsigned int pmu_perf_type, - unsigned int num_counters) +static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group, + struct amdgpu_pmu_event_attribute *pmu_attr, + struct amdgpu_pmu_attr events[], + int num_events) { - char pmu_name[PMU_NAME_SIZE]; - struct amdgpu_pmu_entry *pmu_entry; - int ret = 0; + amdgpu_pmu_create_event_attrs_by_type(attr_group, pmu_attr, events, 0, + num_events, AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE); +} - pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL); - if (!pmu_entry) +static int amdgpu_pmu_alloc_pmu_attrs( + struct attribute_group *fmt_attr_group, + struct amdgpu_pmu_event_attribute **fmt_attr, + struct attribute_group *evt_attr_group, + struct amdgpu_pmu_event_attribute **evt_attr, + struct amdgpu_pmu_config *config) +{ + *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr), + GFP_KERNEL); + + if (!(*fmt_attr)) return -ENOMEM; - pmu_entry->adev = adev; + fmt_attr_group->attrs = kcalloc(config->num_formats + 1, + sizeof(*fmt_attr_group->attrs), GFP_KERNEL); + + if (!fmt_attr_group->attrs) + goto err_fmt_attr_grp; + + *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL); + + if (!(*evt_attr)) + goto err_evt_attr; + + evt_attr_group->attrs = kcalloc(config->num_events + 1, + sizeof(*evt_attr_group->attrs), GFP_KERNEL); + + if (!evt_attr_group->attrs) + goto err_evt_attr_grp; + + return 0; +err_evt_attr_grp: + kfree(*evt_attr); +err_evt_attr: + kfree(fmt_attr_group->attrs); +err_fmt_attr_grp: + kfree(*fmt_attr); + return -ENOMEM; +} + +/* init pmu tracking per pmu type */ +static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry, + struct amdgpu_pmu_config *config) +{ + const struct attribute_group *attr_groups[] = { + &pmu_entry->fmt_attr_group, + &pmu_entry->evt_attr_group, + NULL + }; + char pmu_name[PMU_NAME_SIZE]; + int ret = 0, total_num_events = 0; + pmu_entry->pmu = (struct pmu){ .event_init = amdgpu_perf_event_init, .add = amdgpu_perf_add, @@ -222,59 +483,180 @@ static int init_pmu_by_type(struct amdgpu_device *adev, .task_ctx_nr = perf_invalid_context, }; - pmu_entry->pmu.attr_groups = attr_groups; - pmu_entry->pmu_perf_type = pmu_perf_type; - snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", - pmu_file_prefix, adev->ddev->primary->index); + ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group, + &pmu_entry->fmt_attr, + &pmu_entry->evt_attr_group, + &pmu_entry->evt_attr, + config); + + if (ret) + goto err_out; + + amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr, + config->formats, config->num_formats); + + if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) { + int i; + + for (i = 0; i < config->num_types; i++) { + amdgpu_pmu_create_event_attrs_by_type( + &pmu_entry->evt_attr_group, + pmu_entry->evt_attr, + config->events, + total_num_events, + total_num_events + + config->types[i].num_of_type, + config->types[i].type); + total_num_events += config->types[i].num_of_type; + } + } else { + amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group, + pmu_entry->evt_attr, + config->events, config->num_events); + total_num_events = config->num_events; + } - ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1); + pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), + GFP_KERNEL); - if (ret) { - kfree(pmu_entry); - pr_warn("Error initializing AMDGPU %s PMUs.\n", pmu_type_name); - return ret; + if (!pmu_entry->pmu.attr_groups) { + ret = -ENOMEM; + goto err_attr_group; } - pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n", - pmu_type_name, num_counters); + snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix, + adev_to_drm(pmu_entry->adev)->primary->index); + + ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1); + + if (ret) + goto err_register; + + if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL) + pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n", + pmu_entry->pmu_type_name, total_num_events); + else + pr_info("Detected AMDGPU %d Perf Events.\n", total_num_events); + list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list); return 0; +err_register: + kfree(pmu_entry->pmu.attr_groups); +err_attr_group: + kfree(pmu_entry->fmt_attr_group.attrs); + kfree(pmu_entry->fmt_attr); + kfree(pmu_entry->evt_attr_group.attrs); + kfree(pmu_entry->evt_attr); +err_out: + pr_warn("Error initializing AMDGPU %s PMUs.\n", + pmu_entry->pmu_type_name); + return ret; +} + +/* destroy all pmu data associated with target device */ +void amdgpu_pmu_fini(struct amdgpu_device *adev) +{ + struct amdgpu_pmu_entry *pe, *temp; + + list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) { + if (pe->adev != adev) + continue; + list_del(&pe->entry); + perf_pmu_unregister(&pe->pmu); + kfree(pe->pmu.attr_groups); + kfree(pe->fmt_attr_group.attrs); + kfree(pe->fmt_attr); + kfree(pe->evt_attr_group.attrs); + kfree(pe->evt_attr); + kfree(pe); + } +} + +static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev, + unsigned int pmu_type, + char *pmu_type_name, + char *pmu_file_prefix) +{ + struct amdgpu_pmu_entry *pmu_entry; + + pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL); + + if (!pmu_entry) + return pmu_entry; + + pmu_entry->adev = adev; + pmu_entry->fmt_attr_group.name = "format"; + pmu_entry->fmt_attr_group.attrs = NULL; + pmu_entry->evt_attr_group.name = "events"; + pmu_entry->evt_attr_group.attrs = NULL; + pmu_entry->pmu_perf_type = pmu_type; + pmu_entry->pmu_type_name = pmu_type_name; + pmu_entry->pmu_file_prefix = pmu_file_prefix; + + return pmu_entry; } /* init amdgpu_pmu */ int amdgpu_pmu_init(struct amdgpu_device *adev) { int ret = 0; + struct amdgpu_pmu_entry *pmu_entry, *pmu_entry_df; switch (adev->asic_type) { case CHIP_VEGA20: - /* init df */ - ret = init_pmu_by_type(adev, df_v3_6_attr_groups, - "DF", "amdgpu_df", PERF_TYPE_AMDGPU_DF, - DF_V3_6_MAX_COUNTERS); + pmu_entry_df = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_DF, + "DF", "amdgpu_df"); - /* other pmu types go here*/ - break; - default: - return 0; - } + if (!pmu_entry_df) + return -ENOMEM; - return 0; -} + ret = init_pmu_entry_by_type_and_add(pmu_entry_df, + &df_vega20_config); + + if (ret) { + kfree(pmu_entry_df); + return ret; + } + pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL, + "", "amdgpu"); -/* destroy all pmu data associated with target device */ -void amdgpu_pmu_fini(struct amdgpu_device *adev) -{ - struct amdgpu_pmu_entry *pe, *temp; + if (!pmu_entry) { + amdgpu_pmu_fini(adev); + return -ENOMEM; + } - list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) { - if (pe->adev == adev) { - list_del(&pe->entry); - perf_pmu_unregister(&pe->pmu); - kfree(pe); + ret = init_pmu_entry_by_type_and_add(pmu_entry, + &vega20_config); + + if (ret) { + kfree(pmu_entry); + amdgpu_pmu_fini(adev); + return ret; } + + break; + case CHIP_ARCTURUS: + pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL, + "", "amdgpu"); + if (!pmu_entry) + return -ENOMEM; + + ret = init_pmu_entry_by_type_and_add(pmu_entry, + &arcturus_config); + + if (ret) { + kfree(pmu_entry); + return -ENOMEM; + } + + break; + + default: + return 0; } + + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h index 7dddb7160a11..6882dc48c5d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h @@ -19,18 +19,38 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Author: Jonathan Kim <jonathan.kim@amd.com> - * */ #ifndef _AMDGPU_PMU_H_ #define _AMDGPU_PMU_H_ +/* PMU types. */ enum amdgpu_pmu_perf_type { - PERF_TYPE_AMDGPU_DF = 0, - PERF_TYPE_AMDGPU_MAX + AMDGPU_PMU_PERF_TYPE_NONE = 0, + AMDGPU_PMU_PERF_TYPE_DF, + AMDGPU_PMU_PERF_TYPE_ALL }; +/* + * PMU type AMDGPU_PMU_PERF_TYPE_ALL can hold events of different "type" + * configurations. Event config types are parsed from the 64-bit raw + * config (See EVENT_CONFIG_TYPE_SHIFT and EVENT_CONFIG_TYPE_MASK) and + * are registered into the HW perf events config_base. + * + * PMU types with only a single event configuration type + * (non-AMDGPU_PMU_PERF_TYPE_ALL) have their event config type auto generated + * when the performance counter is added. + */ +enum amdgpu_pmu_event_config_type { + AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE = 0, + AMDGPU_PMU_EVENT_CONFIG_TYPE_DF, + AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI, + AMDGPU_PMU_EVENT_CONFIG_TYPE_MAX +}; + +#define AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT 56 +#define AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK 0xff + int amdgpu_pmu_init(struct amdgpu_device *adev); void amdgpu_pmu_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c new file mode 100644 index 000000000000..d02c8637f909 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2016-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König, Felix Kuehling + */ + +#include "amdgpu.h" + +static inline struct amdgpu_preempt_mgr * +to_preempt_mgr(struct ttm_resource_manager *man) +{ + return container_of(man, struct amdgpu_preempt_mgr, manager); +} + +/** + * DOC: mem_info_preempt_used + * + * The amdgpu driver provides a sysfs API for reporting current total amount of + * used preemptible memory. + * The file mem_info_preempt_used is used for this, and returns the current + * used size of the preemptible block, in bytes + */ +static ssize_t mem_info_preempt_used_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT); + return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man)); +} + +static DEVICE_ATTR_RO(mem_info_preempt_used); + +/** + * amdgpu_preempt_mgr_new - allocate a new node + * + * @man: TTM memory type manager + * @tbo: TTM BO we need this range for + * @place: placement flags and restrictions + * @mem: the resulting mem object + * + * Dummy, just count the space used without allocating resources or any limit. + */ +static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_resource **res) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + + *res = kzalloc(sizeof(**res), GFP_KERNEL); + if (!*res) + return -ENOMEM; + + ttm_resource_init(tbo, place, *res); + (*res)->start = AMDGPU_BO_INVALID_OFFSET; + + atomic64_add((*res)->num_pages, &mgr->used); + return 0; +} + +/** + * amdgpu_preempt_mgr_del - free ranges + * + * @man: TTM memory type manager + * @mem: TTM memory object + * + * Free the allocated GTT again. + */ +static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man, + struct ttm_resource *res) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + + atomic64_sub(res->num_pages, &mgr->used); + kfree(res); +} + +/** + * amdgpu_preempt_mgr_usage - return usage of PREEMPT domain + * + * @man: TTM memory type manager + * + * Return how many bytes are used in the GTT domain + */ +uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + s64 result = atomic64_read(&mgr->used); + + return (result > 0 ? result : 0) * PAGE_SIZE; +} + +/** + * amdgpu_preempt_mgr_debug - dump VRAM table + * + * @man: TTM memory type manager + * @printer: DRM printer to use + * + * Dump the table content using printk. + */ +static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man, + struct drm_printer *printer) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + + drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n", + man->size, (u64)atomic64_read(&mgr->used)); +} + +static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = { + .alloc = amdgpu_preempt_mgr_new, + .free = amdgpu_preempt_mgr_del, + .debug = amdgpu_preempt_mgr_debug +}; + +/** + * amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM + * + * @adev: amdgpu_device pointer + * + * Allocate and initialize the GTT manager. + */ +int amdgpu_preempt_mgr_init(struct amdgpu_device *adev) +{ + struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + + man->use_tt = true; + man->func = &amdgpu_preempt_mgr_func; + + ttm_resource_manager_init(man, (1 << 30)); + + atomic64_set(&mgr->used, 0); + + ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used); + if (ret) { + DRM_ERROR("Failed to create device file mem_info_preempt_used\n"); + return ret; + } + + ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, + &mgr->manager); + ttm_resource_manager_set_used(man, true); + return 0; +} + +/** + * amdgpu_preempt_mgr_fini - free and destroy GTT manager + * + * @adev: amdgpu_device pointer + * + * Destroy and free the GTT manager, returns -EBUSY if ranges are still + * allocated inside it. + */ +void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev) +{ + struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + + ttm_resource_manager_set_used(man, false); + + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + if (ret) + return; + + device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used); + + ttm_resource_manager_cleanup(man); + ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index e1b8d8daeafc..c641f84649d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -24,58 +24,239 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_psp.h" #include "amdgpu_ucode.h" +#include "amdgpu_xgmi.h" #include "soc15_common.h" #include "psp_v3_1.h" #include "psp_v10_0.h" #include "psp_v11_0.h" +#include "psp_v11_0_8.h" #include "psp_v12_0.h" +#include "psp_v13_0.h" #include "amdgpu_ras.h" +#include "amdgpu_securedisplay.h" +#include "amdgpu_atomfirmware.h" -static void psp_set_funcs(struct amdgpu_device *adev); +static int psp_sysfs_init(struct amdgpu_device *adev); +static void psp_sysfs_fini(struct amdgpu_device *adev); + +static int psp_load_smu_fw(struct psp_context *psp); +static int psp_ta_unload(struct psp_context *psp, struct ta_context *context); +static int psp_ta_load(struct psp_context *psp, struct ta_context *context); +static int psp_rap_terminate(struct psp_context *psp); +static int psp_securedisplay_terminate(struct psp_context *psp); + +/* + * Due to DF Cstate management centralized to PMFW, the firmware + * loading sequence will be updated as below: + * - Load KDB + * - Load SYS_DRV + * - Load tOS + * - Load PMFW + * - Setup TMR + * - Load other non-psp fw + * - Load ASD + * - Load XGMI/RAS/HDCP/DTM TA if any + * + * This new sequence is required for + * - Arcturus and onwards + */ +static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + psp->pmfw_centralized_cstate_management = false; + return; + } + + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(13, 0, 2): + psp->pmfw_centralized_cstate_management = true; + break; + default: + psp->pmfw_centralized_cstate_management = false; + break; + } +} static int psp_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - psp_set_funcs(adev); - - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(9, 0, 0): psp_v3_1_set_psp_funcs(psp); psp->autoload_supported = false; break; - case CHIP_RAVEN: + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): psp_v10_0_set_psp_funcs(psp); psp->autoload_supported = false; break; - case CHIP_VEGA20: - case CHIP_ARCTURUS: + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 4): psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = false; break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = true; break; - case CHIP_RENOIR: + case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 1): psp_v12_0_set_psp_funcs(psp); break; + case IP_VERSION(13, 0, 2): + psp_v13_0_set_psp_funcs(psp); + break; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): + psp_v13_0_set_psp_funcs(psp); + psp->autoload_supported = true; + break; + case IP_VERSION(11, 0, 8): + if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { + psp_v11_0_8_set_psp_funcs(psp); + psp->autoload_supported = false; + } + break; default: return -EINVAL; } psp->adev = adev; + psp_check_pmfw_centralized_cstate_management(psp); + + return 0; +} + +static void psp_memory_training_fini(struct psp_context *psp) +{ + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; + kfree(ctx->sys_cache); + ctx->sys_cache = NULL; +} + +static int psp_memory_training_init(struct psp_context *psp) +{ + int ret; + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { + DRM_DEBUG("memory training is not supported!\n"); + return 0; + } + + ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); + if (ctx->sys_cache == NULL) { + DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); + ret = -ENOMEM; + goto Err_out; + } + + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); + ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; return 0; + +Err_out: + psp_memory_training_fini(psp); + return ret; +} + +/* + * Helper funciton to query psp runtime database entry + * + * @adev: amdgpu_device pointer + * @entry_type: the type of psp runtime database entry + * @db_entry: runtime database entry pointer + * + * Return false if runtime database doesn't exit or entry is invalid + * or true if the specific database entry is found, and copy to @db_entry + */ +static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, + enum psp_runtime_entry_type entry_type, + void *db_entry) +{ + uint64_t db_header_pos, db_dir_pos; + struct psp_runtime_data_header db_header = {0}; + struct psp_runtime_data_directory db_dir = {0}; + bool ret = false; + int i; + + db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; + db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); + + /* read runtime db header from vram */ + amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, + sizeof(struct psp_runtime_data_header), false); + + if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { + /* runtime db doesn't exist, exit */ + dev_warn(adev->dev, "PSP runtime database doesn't exist\n"); + return false; + } + + /* read runtime database entry from vram */ + amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, + sizeof(struct psp_runtime_data_directory), false); + + if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { + /* invalid db entry count, exit */ + dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); + return false; + } + + /* look up for requested entry type */ + for (i = 0; i < db_dir.entry_count && !ret; i++) { + if (db_dir.entry_list[i].entry_type == entry_type) { + switch (entry_type) { + case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: + if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { + /* invalid db entry size */ + dev_warn(adev->dev, "Invalid PSP runtime database entry size\n"); + return false; + } + /* read runtime database entry */ + amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, + (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); + ret = true; + break; + default: + ret = false; + break; + } + } + } + + return ret; } static int psp_sw_init(void *handle) @@ -83,22 +264,71 @@ static int psp_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; int ret; + struct psp_runtime_boot_cfg_entry boot_cfg_entry; + struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; - ret = psp_init_microcode(psp); - if (ret) { - DRM_ERROR("Failed to load psp firmware!\n"); - return ret; + psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!psp->cmd) { + DRM_ERROR("Failed to allocate memory to command buffer!\n"); + ret = -ENOMEM; } - ret = psp_mem_training_init(psp); - if (ret) { - DRM_ERROR("Failed to initialize memory training!\n"); - return ret; + if (!amdgpu_sriov_vf(adev)) { + ret = psp_init_microcode(psp); + if (ret) { + DRM_ERROR("Failed to load psp firmware!\n"); + return ret; + } + } else if (amdgpu_sriov_vf(adev) && + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2)) { + ret = psp_init_ta_microcode(psp, "aldebaran"); + if (ret) { + DRM_ERROR("Failed to initialize ta microcode!\n"); + return ret; + } } - ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); - if (ret) { - DRM_ERROR("Failed to process memory training!\n"); - return ret; + + memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); + if (psp_get_runtime_db_entry(adev, + PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, + &boot_cfg_entry)) { + psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; + if ((psp->boot_cfg_bitmask) & + BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { + /* If psp runtime database exists, then + * only enable two stage memory training + * when TWO_STAGE_DRAM_TRAINING bit is set + * in runtime database */ + mem_training_ctx->enable_mem_training = true; + } + + } else { + /* If psp runtime database doesn't exist or + * is invalid, force enable two stage memory + * training */ + mem_training_ctx->enable_mem_training = true; + } + + if (mem_training_ctx->enable_mem_training) { + ret = psp_memory_training_init(psp); + if (ret) { + DRM_ERROR("Failed to initialize memory training!\n"); + return ret; + } + + ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } + } + + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) { + ret= psp_sysfs_init(adev); + if (ret) { + return ret; + } } return 0; @@ -107,16 +337,30 @@ static int psp_sw_init(void *handle) static int psp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct psp_context *psp = &adev->psp; + struct psp_gfx_cmd_resp *cmd = psp->cmd; - psp_mem_training_fini(&adev->psp); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; - if (adev->psp.ta_fw) { - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; + psp_memory_training_fini(psp); + if (psp->sos_fw) { + release_firmware(psp->sos_fw); + psp->sos_fw = NULL; + } + if (psp->asd_fw) { + release_firmware(psp->asd_fw); + psp->asd_fw = NULL; } + if (psp->ta_fw) { + release_firmware(psp->ta_fw); + psp->ta_fw = NULL; + } + + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) + psp_sysfs_fini(adev); + + kfree(cmd); + cmd = NULL; + return 0; } @@ -127,6 +371,9 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, int i; struct amdgpu_device *adev = psp->adev; + if (psp->adev->no_hw_access) + return 0; + for (i = 0; i < adev->usec_timeout; i++) { val = RREG32(reg_index); if (check_changed) { @@ -142,30 +389,73 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, return -ETIME; } +static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) +{ + switch (cmd_id) { + case GFX_CMD_ID_LOAD_TA: + return "LOAD_TA"; + case GFX_CMD_ID_UNLOAD_TA: + return "UNLOAD_TA"; + case GFX_CMD_ID_INVOKE_CMD: + return "INVOKE_CMD"; + case GFX_CMD_ID_LOAD_ASD: + return "LOAD_ASD"; + case GFX_CMD_ID_SETUP_TMR: + return "SETUP_TMR"; + case GFX_CMD_ID_LOAD_IP_FW: + return "LOAD_IP_FW"; + case GFX_CMD_ID_DESTROY_TMR: + return "DESTROY_TMR"; + case GFX_CMD_ID_SAVE_RESTORE: + return "SAVE_RESTORE_IP_FW"; + case GFX_CMD_ID_SETUP_VMR: + return "SETUP_VMR"; + case GFX_CMD_ID_DESTROY_VMR: + return "DESTROY_VMR"; + case GFX_CMD_ID_PROG_REG: + return "PROG_REG"; + case GFX_CMD_ID_GET_FW_ATTESTATION: + return "GET_FW_ATTESTATION"; + case GFX_CMD_ID_LOAD_TOC: + return "ID_LOAD_TOC"; + case GFX_CMD_ID_AUTOLOAD_RLC: + return "AUTOLOAD_RLC"; + case GFX_CMD_ID_BOOT_CFG: + return "BOOT_CFG"; + default: + return "UNKNOWN CMD"; + } +} + static int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) { int ret; - int index; - int timeout = 2000; + int index, idx; + int timeout = 20000; + bool ras_intr = false; + bool skip_unsupport = false; - mutex_lock(&psp->mutex); + if (psp->adev->no_hw_access) + return 0; + + if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) + return 0; memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); index = atomic_inc_return(&psp->fence_value); - ret = psp_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); + ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); if (ret) { atomic_dec(&psp->fence_value); - mutex_unlock(&psp->mutex); - return ret; + goto exit; } - amdgpu_asic_invalidate_hdp(psp->adev, NULL); + amdgpu_device_invalidate_hdp(psp->adev, NULL); while (*((unsigned int *)psp->fence_buf) != index) { if (--timeout == 0) break; @@ -174,12 +464,19 @@ psp_cmd_submit_buf(struct psp_context *psp, * because gpu reset thread triggered and lock resource should * be released for psp resume sequence. */ - if (amdgpu_ras_intr_triggered()) + ras_intr = amdgpu_ras_intr_triggered(); + if (ras_intr) break; - msleep(1); - amdgpu_asic_invalidate_hdp(psp->adev, NULL); + usleep_range(10, 100); + amdgpu_device_invalidate_hdp(psp->adev, NULL); } + /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ + skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || + psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); + + memcpy((void*)&cmd->resp, (void*)&psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); + /* In some cases, psp response status is not 0 even there is no * problem while the command is submitted. Some version of PSP FW * doesn't write 0 to that field. @@ -187,42 +484,63 @@ psp_cmd_submit_buf(struct psp_context *psp, * during psp initialization to avoid breaking hw_init and it doesn't * return -EINVAL. */ - if (psp->cmd_buf_mem->resp.status || !timeout) { + if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { if (ucode) - DRM_WARN("failed to load ucode id (%d) ", - ucode->ucode_id); - DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n", - psp->cmd_buf_mem->cmd_id, - psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); + DRM_WARN("failed to load ucode %s(0x%X) ", + amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); + DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n", + psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, + psp->cmd_buf_mem->resp.status); if (!timeout) { - mutex_unlock(&psp->mutex); - return -EINVAL; + ret = -EINVAL; + goto exit; } } - /* get xGMI session id from response buffer */ - cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; - if (ucode) { ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; } - mutex_unlock(&psp->mutex); +exit: + drm_dev_exit(idx); return ret; } +static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) +{ + struct psp_gfx_cmd_resp *cmd = psp->cmd; + + mutex_lock(&psp->mutex); + + memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); + + return cmd; +} + +void release_psp_cmd_buf(struct psp_context *psp) +{ + mutex_unlock(&psp->mutex); +} + static void psp_prep_tmr_cmd_buf(struct psp_context *psp, struct psp_gfx_cmd_resp *cmd, - uint64_t tmr_mc, uint32_t size) + uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) { - if (psp_support_vmr_ring(psp)) + struct amdgpu_device *adev = psp->adev; + uint32_t size = amdgpu_bo_size(tmr_bo); + uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); + + if (amdgpu_sriov_vf(psp->adev)) cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; else cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); cmd->cmd.cmd_setup_tmr.buf_size = size; + cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; + cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); + cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); } static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, @@ -239,22 +557,20 @@ static int psp_load_toc(struct psp_context *psp, uint32_t *tmr_size) { int ret; - struct psp_gfx_cmd_resp *cmd; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; /* Copy toc to psp firmware private buffer */ - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); + psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); - psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); + psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); if (!ret) *tmr_size = psp->cmd_buf_mem->resp.tmr_size; - kfree(cmd); + + release_psp_cmd_buf(psp); + return ret; } @@ -273,13 +589,13 @@ static int psp_tmr_init(struct psp_context *psp) * Note: this memory need be reserved till the driver * uninitializes. */ - tmr_size = PSP_TMR_SIZE; + tmr_size = PSP_TMR_SIZE(psp->adev); /* For ASICs support RLC autoload, psp will parse the toc * and calculate the total size of TMR needed */ if (!amdgpu_sriov_vf(psp->adev) && - psp->toc_start_addr && - psp->toc_bin_size && + psp->toc.start_addr && + psp->toc.size_bytes && psp->fw_pri_buf) { ret = psp_load_toc(psp, &tmr_size); if (ret) { @@ -289,92 +605,263 @@ static int psp_tmr_init(struct psp_context *psp) } pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; - ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, + ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev), AMDGPU_GEM_DOMAIN_VRAM, &psp->tmr_bo, &psp->tmr_mc_addr, pptr); return ret; } +static bool psp_skip_tmr(struct psp_context *psp) +{ + switch (psp->adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(13, 0, 2): + return true; + default: + return false; + } +} + static int psp_tmr_load(struct psp_context *psp) { int ret; struct psp_gfx_cmd_resp *cmd; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. + * Already set up by host driver. + */ + if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); - psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, - amdgpu_bo_size(psp->tmr_bo)); + psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - kfree(cmd); + release_psp_cmd_buf(psp); + + return ret; +} + +static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, + struct psp_gfx_cmd_resp *cmd) +{ + if (amdgpu_sriov_vf(psp->adev)) + cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; + else + cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; +} + +static int psp_tmr_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + psp_prep_tmr_unload_cmd_buf(psp, cmd); + DRM_INFO("free PSP TMR buffer\n"); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + + release_psp_cmd_buf(psp); return ret; } -static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t asd_mc, uint64_t asd_mc_shared, - uint32_t size, uint32_t shared_size) +static int psp_tmr_terminate(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); - cmd->cmd.cmd_load_ta.app_len = size; + int ret; + void *tmr_buf; + void **pptr; + + ret = psp_tmr_unload(psp); + if (ret) + return ret; + + /* free TMR memory buffer */ + pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; + amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; + return 0; } -static int psp_asd_init(struct psp_context *psp) +int psp_get_fw_attestation_records_addr(struct psp_context *psp, + uint64_t *output_ptr) { int ret; + struct psp_gfx_cmd_resp *cmd; - /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for shared ASD <-> Driver - */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &psp->asd_shared_bo, - &psp->asd_shared_mc_addr, - &psp->asd_shared_buf); + if (!output_ptr) + return -EINVAL; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + + if (!ret) { + *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + + ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); + } + + release_psp_cmd_buf(psp); return ret; } -static int psp_asd_load(struct psp_context *psp) +static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) +{ + struct psp_context *psp = &adev->psp; + struct psp_gfx_cmd_resp *cmd; + int ret; + + if (amdgpu_sriov_vf(adev)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; + cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (!ret) { + *boot_cfg = + (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; + } + + release_psp_cmd_buf(psp); + + return ret; +} + +static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) { int ret; + struct psp_context *psp = &adev->psp; struct psp_gfx_cmd_resp *cmd; + if (amdgpu_sriov_vf(adev)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; + cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; + cmd->cmd.boot_cfg.boot_config = boot_cfg; + cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + release_psp_cmd_buf(psp); + + return ret; +} + +static int psp_rl_load(struct amdgpu_device *adev) +{ + int ret; + struct psp_context *psp = &adev->psp; + struct psp_gfx_cmd_resp *cmd; + + if (!is_psp_fw_valid(psp->rl)) + return 0; + + cmd = acquire_psp_cmd_buf(psp); + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); + + cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; + cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + release_psp_cmd_buf(psp); + + return ret; +} + +static int psp_asd_load(struct psp_context *psp) +{ + return psp_ta_load(psp, &psp->asd_context); +} + +static int psp_asd_initialize(struct psp_context *psp) +{ + int ret; + /* If PSP version doesn't match ASD version, asd loading will be failed. * add workaround to bypass it for sriov now. * TODO: add version check to make it common */ - if (amdgpu_sriov_vf(psp->adev)) + if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + psp->asd_context.mem_context.shared_mc_addr = 0; + psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; + psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); + ret = psp_asd_load(psp); + if (!ret) + psp->asd_context.initialized = true; - psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr, - psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE); + return ret; +} - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); +static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t session_id) +{ + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; + cmd->cmd.cmd_unload_ta.session_id = session_id; +} - kfree(cmd); +static int psp_ta_unload(struct psp_context *psp, struct ta_context *context) +{ + int ret; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + psp_prep_ta_unload_cmd_buf(cmd, context->session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + release_psp_cmd_buf(psp); + + return ret; +} + +static int psp_asd_unload(struct psp_context *psp) +{ + return psp_ta_unload(psp, &psp->asd_context); +} + +static int psp_asd_terminate(struct psp_context *psp) +{ + int ret; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->asd_context.initialized) + return 0; + + ret = psp_asd_unload(psp); + + if (!ret) + psp->asd_context.initialized = false; return ret; } @@ -390,189 +877,177 @@ static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, uint32_t value) { - struct psp_gfx_cmd_resp *cmd = NULL; + struct psp_gfx_cmd_resp *cmd; int ret = 0; if (reg >= PSP_REG_LAST) return -EINVAL; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + cmd = acquire_psp_cmd_buf(psp); psp_prep_reg_prog_cmd_buf(cmd, reg, value); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + DRM_ERROR("PSP failed to program reg id %d", reg); + + release_psp_cmd_buf(psp); - kfree(cmd); return ret; } -static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared, - uint32_t xgmi_ta_size, uint32_t shared_size) +static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t ta_bin_mc, + struct ta_context *context) { - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc); - cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size; + cmd->cmd_id = context->ta_load_type; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); + cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = + lower_32_bits(context->mem_context.shared_mc_addr); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = + upper_32_bits(context->mem_context.shared_mc_addr); + cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; } -static int psp_xgmi_init_shared_buf(struct psp_context *psp) +static int psp_ta_init_shared_buf(struct psp_context *psp, + struct ta_mem_context *mem_ctx) { int ret; /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for xgmi ta <-> Driver - */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for ta to host memory + */ + ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &psp->xgmi_context.xgmi_shared_bo, - &psp->xgmi_context.xgmi_shared_mc_addr, - &psp->xgmi_context.xgmi_shared_buf); + &mem_ctx->shared_bo, + &mem_ctx->shared_mc_addr, + &mem_ctx->shared_buf); return ret; } -static int psp_xgmi_load(struct psp_context *psp) +static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) { - int ret; - struct psp_gfx_cmd_resp *cmd; + amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, + &mem_ctx->shared_buf); +} - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; +static int psp_xgmi_init_shared_buf(struct psp_context *psp) +{ + return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); +} - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; +static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; +} - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); +static int psp_ta_invoke(struct psp_context *psp, + uint32_t ta_cmd_id, + struct ta_context *context) +{ + int ret; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->xgmi_context.xgmi_shared_mc_addr, - psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE); + psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - if (!ret) { - psp->xgmi_context.initialized = 1; - psp->xgmi_context.session_id = cmd->resp.session_id; - } - - kfree(cmd); + release_psp_cmd_buf(psp); return ret; } -static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t xgmi_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; - cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id; -} - -static int psp_xgmi_unload(struct psp_context *psp) +static int psp_ta_load(struct psp_context *psp, struct ta_context *context) { int ret; struct psp_gfx_cmd_resp *cmd; - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; + cmd = acquire_psp_cmd_buf(psp); - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + psp_copy_fw(psp, context->bin_desc.start_addr, + context->bin_desc.size_bytes); - psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); + psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - kfree(cmd); + if (!ret) { + context->session_id = cmd->resp.session_id; + } + + release_psp_cmd_buf(psp); return ret; } -static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t xgmi_session_id) +static int psp_xgmi_load(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ + return psp_ta_load(psp, &psp->xgmi_context.context); } -int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +static int psp_xgmi_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->xgmi_context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - kfree(cmd); + return psp_ta_unload(psp, &psp->xgmi_context.context); +} - return ret; +int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); } -static int psp_xgmi_terminate(struct psp_context *psp) +int psp_xgmi_terminate(struct psp_context *psp) { int ret; + struct amdgpu_device *adev = psp->adev; - if (!psp->xgmi_context.initialized) + /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || + (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && + adev->gmc.xgmi.connected_to_cpu)) + return 0; + + if (!psp->xgmi_context.context.initialized) return 0; ret = psp_xgmi_unload(psp); if (ret) return ret; - psp->xgmi_context.initialized = 0; + psp->xgmi_context.context.initialized = false; /* free xgmi shared memory */ - amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, - &psp->xgmi_context.xgmi_shared_mc_addr, - &psp->xgmi_context.xgmi_shared_buf); + psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); return 0; } -static int psp_xgmi_initialize(struct psp_context *psp) +int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) { struct ta_xgmi_shared_memory *xgmi_cmd; int ret; - if (!psp->adev->psp.ta_fw || - !psp->adev->psp.ta_xgmi_ucode_size || - !psp->adev->psp.ta_xgmi_start_addr) + if (!psp->ta_fw || + !psp->xgmi_context.context.bin_desc.size_bytes || + !psp->xgmi_context.context.bin_desc.start_addr) return -ENOENT; - if (!psp->xgmi_context.initialized) { + if (!load_ta) + goto invoke; + + psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; + psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + + if (!psp->xgmi_context.context.initialized) { ret = psp_xgmi_init_shared_buf(psp); if (ret) return ret; @@ -580,12 +1055,16 @@ static int psp_xgmi_initialize(struct psp_context *psp) /* Load XGMI TA */ ret = psp_xgmi_load(psp); - if (ret) + if (!ret) + psp->xgmi_context.context.initialized = true; + else return ret; +invoke: /* Initialize XGMI session */ - xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); + xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); + xgmi_cmd->flag_extend_link_record = set_extended_data; xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); @@ -593,119 +1072,255 @@ static int psp_xgmi_initialize(struct psp_context *psp) return ret; } -// ras begin -static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t ras_ta_mc, uint64_t ras_mc_shared, - uint32_t ras_ta_size, uint32_t shared_size) +int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) { - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc); - cmd->cmd.cmd_load_ta.app_len = ras_ta_size; + struct ta_xgmi_shared_memory *xgmi_cmd; + int ret; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; + xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); + + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; + + /* Invoke xgmi ta to get hive id */ + ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); + if (ret) + return ret; + + *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; + + return 0; } -static int psp_ras_init_shared_buf(struct psp_context *psp) +int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) { + struct ta_xgmi_shared_memory *xgmi_cmd; int ret; - /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for ras ta <-> Driver - */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &psp->ras.ras_shared_bo, - &psp->ras.ras_shared_mc_addr, - &psp->ras.ras_shared_buf); + xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); - return ret; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; + + /* Invoke xgmi ta to get the node id */ + ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); + if (ret) + return ret; + + *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; + + return 0; } -static int psp_ras_load(struct psp_context *psp) +static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; + return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && + psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b; +} - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; +/* + * Chips that support extended topology information require the driver to + * reflect topology information in the opposite direction. This is + * because the TA has already exceeded its link record limit and if the + * TA holds bi-directional information, the driver would have to do + * multiple fetches instead of just two. + */ +static void psp_xgmi_reflect_topology_info(struct psp_context *psp, + struct psp_xgmi_node_info node_info) +{ + struct amdgpu_device *mirror_adev; + struct amdgpu_hive_info *hive; + uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; + uint64_t dst_node_id = node_info.node_id; + uint8_t dst_num_hops = node_info.num_hops; + uint8_t dst_num_links = node_info.num_links; + + hive = amdgpu_get_xgmi_hive(psp->adev); + list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { + struct psp_xgmi_topology_info *mirror_top_info; + int j; + + if (mirror_adev->gmc.xgmi.node_id != dst_node_id) + continue; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; + for (j = 0; j < mirror_top_info->num_nodes; j++) { + if (mirror_top_info->nodes[j].node_id != src_node_id) + continue; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); + mirror_top_info->nodes[j].num_hops = dst_num_hops; + /* + * prevent 0 num_links value re-reflection since reflection + * criteria is based on num_hops (direct or indirect). + * + */ + if (dst_num_links) + mirror_top_info->nodes[j].num_links = dst_num_links; - psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->ras.ras_shared_mc_addr, - psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE); + break; + } - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); + break; + } +} - if (!ret) { - psp->ras.ras_initialized = 1; - psp->ras.session_id = cmd->resp.session_id; +int psp_xgmi_get_topology_info(struct psp_context *psp, + int number_devices, + struct psp_xgmi_topology_info *topology, + bool get_extended_data) +{ + struct ta_xgmi_shared_memory *xgmi_cmd; + struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; + struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; + int i; + int ret; + + if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) + return -EINVAL; + + xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); + xgmi_cmd->flag_extend_link_record = get_extended_data; + + /* Fill in the shared memory with topology information as input */ + topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; + topology_info_input->num_nodes = number_devices; + + for (i = 0; i < topology_info_input->num_nodes; i++) { + topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; + topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; + topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; + topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; } - kfree(cmd); + /* Invoke xgmi ta to get the topology information */ + ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); + if (ret) + return ret; - return ret; -} + /* Read the output topology information from the shared memory */ + topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; + topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; + for (i = 0; i < topology->num_nodes; i++) { + /* extended data will either be 0 or equal to non-extended data */ + if (topology_info_output->nodes[i].num_hops) + topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; + + /* non-extended data gets everything here so no need to update */ + if (!get_extended_data) { + topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; + topology->nodes[i].is_sharing_enabled = + topology_info_output->nodes[i].is_sharing_enabled; + topology->nodes[i].sdma_engine = + topology_info_output->nodes[i].sdma_engine; + } -static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ras_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; - cmd->cmd.cmd_unload_ta.session_id = ras_session_id; + } + + /* Invoke xgmi ta again to get the link information */ + if (psp_xgmi_peer_link_info_supported(psp)) { + struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output; + + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; + + ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS); + + if (ret) + return ret; + + link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; + for (i = 0; i < topology->num_nodes; i++) { + /* accumulate num_links on extended data */ + topology->nodes[i].num_links = get_extended_data ? + topology->nodes[i].num_links + + link_info_output->nodes[i].num_links : + link_info_output->nodes[i].num_links; + + /* reflect the topology information for bi-directionality */ + if (psp->xgmi_context.supports_extended_data && + get_extended_data && topology->nodes[i].num_hops) + psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); + } + } + + return 0; } -static int psp_ras_unload(struct psp_context *psp) +int psp_xgmi_set_topology_info(struct psp_context *psp, + int number_devices, + struct psp_xgmi_topology_info *topology) { - int ret; - struct psp_gfx_cmd_resp *cmd; + struct ta_xgmi_shared_memory *xgmi_cmd; + struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; + int i; - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; + if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) + return -EINVAL; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; + memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); - psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id); + topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; + xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; + topology_info_input->num_nodes = number_devices; - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); + for (i = 0; i < topology_info_input->num_nodes; i++) { + topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; + topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; + topology_info_input->nodes[i].is_sharing_enabled = 1; + topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; + } - kfree(cmd); + /* Invoke xgmi ta to set topology information */ + return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); +} - return ret; +// ras begin +static int psp_ras_init_shared_buf(struct psp_context *psp) +{ + return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); +} + +static int psp_ras_load(struct psp_context *psp) +{ + return psp_ta_load(psp, &psp->ras_context.context); } -static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t ras_session_id) +static int psp_ras_unload(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ + return psp_ta_unload(psp, &psp->ras_context.context); +} + +static void psp_ras_ta_check_status(struct psp_context *psp) +{ + struct ta_ras_shared_memory *ras_cmd = + (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + + switch (ras_cmd->ras_status) { + case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: + dev_warn(psp->adev->dev, + "RAS WARNING: cmd failed due to unsupported ip\n"); + break; + case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: + dev_warn(psp->adev->dev, + "RAS WARNING: cmd failed due to unsupported error injection\n"); + break; + case TA_RAS_STATUS__SUCCESS: + break; + default: + dev_warn(psp->adev->dev, + "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + break; + } } int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { + struct ta_ras_shared_memory *ras_cmd; int ret; - struct psp_gfx_cmd_resp *cmd; + + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; /* * TODO: bypass the loading in sriov for now @@ -713,17 +1328,29 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); - psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->ras.session_id); + if (amdgpu_ras_intr_triggered()) + return ret; - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); + if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) + { + DRM_WARN("RAS: Unsupported Interface"); + return -EINVAL; + } - kfree(cmd); + if (!ret) { + if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { + dev_warn(psp->adev->dev, "ECC switch disabled\n"); + + ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; + } + else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) + dev_warn(psp->adev->dev, + "RAS internal register access blocked\n"); + + psp_ras_ta_check_status(psp); + } return ret; } @@ -734,10 +1361,10 @@ int psp_ras_enable_features(struct psp_context *psp, struct ta_ras_shared_memory *ras_cmd; int ret; - if (!psp->ras.ras_initialized) + if (!psp->ras_context.context.initialized) return -EINVAL; - ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); if (enable) @@ -751,7 +1378,7 @@ int psp_ras_enable_features(struct psp_context *psp, if (ret) return -EINVAL; - return ras_cmd->ras_status; + return 0; } static int psp_ras_terminate(struct psp_context *psp) @@ -764,19 +1391,17 @@ static int psp_ras_terminate(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->ras.ras_initialized) + if (!psp->ras_context.context.initialized) return 0; ret = psp_ras_unload(psp); if (ret) return ret; - psp->ras.ras_initialized = 0; + psp->ras_context.context.initialized = false; /* free ras shared memory */ - amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, - &psp->ras.ras_shared_mc_addr, - &psp->ras.ras_shared_buf); + psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); return 0; } @@ -784,199 +1409,278 @@ static int psp_ras_terminate(struct psp_context *psp) static int psp_ras_initialize(struct psp_context *psp) { int ret; + uint32_t boot_cfg = 0xFF; + struct amdgpu_device *adev = psp->adev; + struct ta_ras_shared_memory *ras_cmd; /* * TODO: bypass the initialize in sriov for now */ - if (amdgpu_sriov_vf(psp->adev)) + if (amdgpu_sriov_vf(adev)) return 0; - if (!psp->adev->psp.ta_ras_ucode_size || - !psp->adev->psp.ta_ras_start_addr) { - dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); + if (!adev->psp.ras_context.context.bin_desc.size_bytes || + !adev->psp.ras_context.context.bin_desc.start_addr) { + dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); return 0; } - if (!psp->ras.ras_initialized) { + if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { + /* query GECC enablement status from boot config + * boot_cfg: 1: GECC is enabled or 0: GECC is disabled + */ + ret = psp_boot_config_get(adev, &boot_cfg); + if (ret) + dev_warn(adev->dev, "PSP get boot config failed\n"); + + if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { + if (!boot_cfg) { + dev_info(adev->dev, "GECC is disabled\n"); + } else { + /* disable GECC in next boot cycle if ras is + * disabled by module parameter amdgpu_ras_enable + * and/or amdgpu_ras_mask, or boot_config_get call + * is failed + */ + ret = psp_boot_config_set(adev, 0); + if (ret) + dev_warn(adev->dev, "PSP set boot config failed\n"); + else + dev_warn(adev->dev, "GECC will be disabled in next boot cycle " + "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); + } + } else { + if (1 == boot_cfg) { + dev_info(adev->dev, "GECC is enabled\n"); + } else { + /* enable GECC in next boot cycle if it is disabled + * in boot config, or force enable GECC if failed to + * get boot configuration + */ + ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); + if (ret) + dev_warn(adev->dev, "PSP set boot config failed\n"); + else + dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); + } + } + } + + psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; + psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + + if (!psp->ras_context.context.initialized) { ret = psp_ras_init_shared_buf(psp); if (ret) return ret; } + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); + + if (amdgpu_ras_is_poison_mode_supported(adev)) + ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; + if (!adev->gmc.xgmi.connected_to_cpu) + ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; + ret = psp_ras_load(psp); + + if (!ret && !ras_cmd->ras_status) + psp->ras_context.context.initialized = true; + else { + if (ras_cmd->ras_status) + dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); + amdgpu_ras_fini(psp->adev); + } + + return ret; +} + +int psp_ras_trigger_error(struct psp_context *psp, + struct ta_ras_trigger_error_input *info) +{ + struct ta_ras_shared_memory *ras_cmd; + int ret; + + if (!psp->ras_context.context.initialized) + return -EINVAL; + + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); + + ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; + ras_cmd->ras_in_message.trigger_error = *info; + + ret = psp_ras_invoke(psp, ras_cmd->cmd_id); if (ret) - return ret; + return -EINVAL; + + /* If err_event_athub occurs error inject was successful, however + return status from TA is no long reliable */ + if (amdgpu_ras_intr_triggered()) + return 0; + + if (ras_cmd->ras_status) + return -EINVAL; return 0; } // ras end // HDCP start -static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t hdcp_ta_mc, - uint64_t hdcp_mc_shared, - uint32_t hdcp_ta_size, - uint32_t shared_size) +static int psp_hdcp_init_shared_buf(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc); - cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size; - - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = - lower_32_bits(hdcp_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = - upper_32_bits(hdcp_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; + return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); } -static int psp_hdcp_init_shared_buf(struct psp_context *psp) +static int psp_hdcp_load(struct psp_context *psp) { - int ret; - - /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for hdcp ta <-> Driver - */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &psp->hdcp_context.hdcp_shared_bo, - &psp->hdcp_context.hdcp_shared_mc_addr, - &psp->hdcp_context.hdcp_shared_buf); - - return ret; + return psp_ta_load(psp, &psp->hdcp_context.context); } -static int psp_hdcp_load(struct psp_context *psp) +static int psp_hdcp_initialize(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd; /* - * TODO: bypass the loading in sriov for now + * TODO: bypass the initialize in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, - psp->ta_hdcp_ucode_size); + if (!psp->hdcp_context.context.bin_desc.size_bytes || + !psp->hdcp_context.context.bin_desc.start_addr) { + dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); + return 0; + } - psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->hdcp_context.hdcp_shared_mc_addr, - psp->ta_hdcp_ucode_size, - PSP_HDCP_SHARED_MEM_SIZE); + psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; + psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (!psp->hdcp_context.context.initialized) { + ret = psp_hdcp_init_shared_buf(psp); + if (ret) + return ret; + } + ret = psp_hdcp_load(psp); if (!ret) { - psp->hdcp_context.hdcp_initialized = 1; - psp->hdcp_context.session_id = cmd->resp.session_id; + psp->hdcp_context.context.initialized = true; + mutex_init(&psp->hdcp_context.mutex); } - kfree(cmd); - return ret; } -static int psp_hdcp_initialize(struct psp_context *psp) + +static int psp_hdcp_unload(struct psp_context *psp) { - int ret; + return psp_ta_unload(psp, &psp->hdcp_context.context); +} +int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ /* - * TODO: bypass the initialize in sriov for now + * TODO: bypass the loading in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->adev->psp.ta_hdcp_ucode_size || - !psp->adev->psp.ta_hdcp_start_addr) { - dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); + return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); +} + +static int psp_hdcp_terminate(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) return 0; - } - if (!psp->hdcp_context.hdcp_initialized) { - ret = psp_hdcp_init_shared_buf(psp); - if (ret) - return ret; + if (!psp->hdcp_context.context.initialized) { + if (psp->hdcp_context.context.mem_context.shared_buf) + goto out; + else + return 0; } - ret = psp_hdcp_load(psp); + ret = psp_hdcp_unload(psp); if (ret) return ret; + psp->hdcp_context.context.initialized = false; + +out: + /* free hdcp shared memory */ + psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); + return 0; } -static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t hdcp_session_id) +// HDCP end + +// DTM start +static int psp_dtm_init_shared_buf(struct psp_context *psp) +{ + return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); +} + +static int psp_dtm_load(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; - cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id; + return psp_ta_load(psp, &psp->dtm_context.context); } -static int psp_hdcp_unload(struct psp_context *psp) +static int psp_dtm_initialize(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd; /* - * TODO: bypass the unloading in sriov for now + * TODO: bypass the initialize in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + if (!psp->dtm_context.context.bin_desc.size_bytes || + !psp->dtm_context.context.bin_desc.start_addr) { + dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); + return 0; + } - psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); + psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; + psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (!psp->dtm_context.context.initialized) { + ret = psp_dtm_init_shared_buf(psp); + if (ret) + return ret; + } - kfree(cmd); + ret = psp_dtm_load(psp); + if (!ret) { + psp->dtm_context.context.initialized = true; + mutex_init(&psp->dtm_context.mutex); + } return ret; } -static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t hdcp_session_id) +static int psp_dtm_unload(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ + return psp_ta_unload(psp, &psp->dtm_context.context); } -int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { - int ret; - struct psp_gfx_cmd_resp *cmd; - /* * TODO: bypass the loading in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->hdcp_context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - kfree(cmd); - - return ret; + return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); } -static int psp_hdcp_terminate(struct psp_context *psp) +static int psp_dtm_terminate(struct psp_context *psp) { int ret; @@ -986,96 +1690,162 @@ static int psp_hdcp_terminate(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->hdcp_context.hdcp_initialized) - return 0; + if (!psp->dtm_context.context.initialized) { + if (psp->dtm_context.context.mem_context.shared_buf) + goto out; + else + return 0; + } - ret = psp_hdcp_unload(psp); + ret = psp_dtm_unload(psp); if (ret) return ret; - psp->hdcp_context.hdcp_initialized = 0; + psp->dtm_context.context.initialized = false; - /* free hdcp shared memory */ - amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, - &psp->hdcp_context.hdcp_shared_mc_addr, - &psp->hdcp_context.hdcp_shared_buf); +out: + /* free dtm shared memory */ + psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); return 0; } -// HDCP end +// DTM end -// DTM start -static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t dtm_ta_mc, - uint64_t dtm_mc_shared, - uint32_t dtm_ta_size, - uint32_t shared_size) +// RAP start +static int psp_rap_init_shared_buf(struct psp_context *psp) +{ + return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); +} + +static int psp_rap_load(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc); - cmd->cmd.cmd_load_ta.app_len = dtm_ta_size; + return psp_ta_load(psp, &psp->rap_context.context); +} - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +static int psp_rap_unload(struct psp_context *psp) +{ + return psp_ta_unload(psp, &psp->rap_context.context); } -static int psp_dtm_init_shared_buf(struct psp_context *psp) +static int psp_rap_initialize(struct psp_context *psp) { int ret; + enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for dtm ta <-> Driver + * TODO: bypass the initialize in sriov for now */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &psp->dtm_context.dtm_shared_bo, - &psp->dtm_context.dtm_shared_mc_addr, - &psp->dtm_context.dtm_shared_buf); + if (amdgpu_sriov_vf(psp->adev)) + return 0; - return ret; + if (!psp->rap_context.context.bin_desc.size_bytes || + !psp->rap_context.context.bin_desc.start_addr) { + dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); + return 0; + } + + psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; + psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + + if (!psp->rap_context.context.initialized) { + ret = psp_rap_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_rap_load(psp); + if (!ret) { + psp->rap_context.context.initialized = true; + mutex_init(&psp->rap_context.mutex); + } else + return ret; + + ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); + if (ret || status != TA_RAP_STATUS__SUCCESS) { + psp_rap_terminate(psp); + + dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", + ret, status); + + return ret; + } + + return 0; } -static int psp_dtm_load(struct psp_context *psp) +static int psp_rap_terminate(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd; - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) + if (!psp->rap_context.context.initialized) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + ret = psp_rap_unload(psp); - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); + psp->rap_context.context.initialized = false; - psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->dtm_context.dtm_shared_mc_addr, - psp->ta_dtm_ucode_size, - PSP_DTM_SHARED_MEM_SIZE); + /* free rap shared memory */ + psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + return ret; +} - if (!ret) { - psp->dtm_context.dtm_initialized = 1; - psp->dtm_context.session_id = cmd->resp.session_id; - } +int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) +{ + struct ta_rap_shared_memory *rap_cmd; + int ret = 0; - kfree(cmd); + if (!psp->rap_context.context.initialized) + return 0; + + if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && + ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) + return -EINVAL; + + mutex_lock(&psp->rap_context.mutex); + + rap_cmd = (struct ta_rap_shared_memory *) + psp->rap_context.context.mem_context.shared_buf; + memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); + + rap_cmd->cmd_id = ta_cmd_id; + rap_cmd->validation_method_id = METHOD_A; + + ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); + if (ret) + goto out_unlock; + + if (status) + *status = rap_cmd->rap_status; + +out_unlock: + mutex_unlock(&psp->rap_context.mutex); return ret; } +// RAP end -static int psp_dtm_initialize(struct psp_context *psp) +/* securedisplay start */ +static int psp_securedisplay_init_shared_buf(struct psp_context *psp) +{ + return psp_ta_init_shared_buf( + psp, &psp->securedisplay_context.context.mem_context); +} + +static int psp_securedisplay_load(struct psp_context *psp) +{ + return psp_ta_load(psp, &psp->securedisplay_context.context); +} + +static int psp_securedisplay_unload(struct psp_context *psp) +{ + return psp_ta_unload(psp, &psp->securedisplay_context.context); +} + +static int psp_securedisplay_initialize(struct psp_context *psp) { int ret; + struct securedisplay_cmd *securedisplay_cmd; /* * TODO: bypass the initialize in sriov for now @@ -1083,95 +1853,101 @@ static int psp_dtm_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->adev->psp.ta_dtm_ucode_size || - !psp->adev->psp.ta_dtm_start_addr) { - dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); + if (!psp->securedisplay_context.context.bin_desc.size_bytes || + !psp->securedisplay_context.context.bin_desc.start_addr) { + dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); return 0; } - if (!psp->dtm_context.dtm_initialized) { - ret = psp_dtm_init_shared_buf(psp); + psp->securedisplay_context.context.mem_context.shared_mem_size = + PSP_SECUREDISPLAY_SHARED_MEM_SIZE; + psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + + if (!psp->securedisplay_context.context.initialized) { + ret = psp_securedisplay_init_shared_buf(psp); if (ret) return ret; } - ret = psp_dtm_load(psp); - if (ret) + ret = psp_securedisplay_load(psp); + if (!ret) { + psp->securedisplay_context.context.initialized = true; + mutex_init(&psp->securedisplay_context.mutex); + } else return ret; - return 0; -} + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__QUERY_TA); -static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint32_t ta_cmd_id, - uint32_t dtm_session_id) -{ - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; - cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id; - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; - /* Note: cmd_invoke_cmd.buf is not used for now */ + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); + if (ret) { + psp_securedisplay_terminate(psp); + dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); + return -EINVAL; + } + + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", + securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); + } + + return 0; } -int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +static int psp_securedisplay_terminate(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd; /* - * TODO: bypass the loading in sriov for now + * TODO:bypass the terminate in sriov for now */ if (amdgpu_sriov_vf(psp->adev)) return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + if (!psp->securedisplay_context.context.initialized) + return 0; - psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id, - psp->dtm_context.session_id); + ret = psp_securedisplay_unload(psp); + if (ret) + return ret; - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + psp->securedisplay_context.context.initialized = false; - kfree(cmd); + /* free securedisplay shared memory */ + psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); return ret; } -static int psp_dtm_terminate(struct psp_context *psp) +int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { int ret; - /* - * TODO: bypass the terminate in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; + if (!psp->securedisplay_context.context.initialized) + return -EINVAL; - if (!psp->dtm_context.dtm_initialized) - return 0; + if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && + ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) + return -EINVAL; - ret = psp_hdcp_unload(psp); - if (ret) - return ret; + mutex_lock(&psp->securedisplay_context.mutex); - psp->dtm_context.dtm_initialized = 0; + ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); - /* free hdcp shared memory */ - amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, - &psp->dtm_context.dtm_shared_mc_addr, - &psp->dtm_context.dtm_shared_buf); + mutex_unlock(&psp->securedisplay_context.mutex); - return 0; + return ret; } -// DTM end +/* SECUREDISPLAY end */ static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; int ret; - if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) { - if (psp->kdb_bin_size && + if (!amdgpu_sriov_vf(adev)) { + if ((is_psp_fw_valid(psp->kdb)) && (psp->funcs->bootloader_load_kdb != NULL)) { ret = psp_bootloader_load_kdb(psp); if (ret) { @@ -1180,16 +1956,58 @@ static int psp_hw_start(struct psp_context *psp) } } - ret = psp_bootloader_load_sysdrv(psp); - if (ret) { - DRM_ERROR("PSP load sysdrv failed!\n"); - return ret; + if ((is_psp_fw_valid(psp->spl)) && + (psp->funcs->bootloader_load_spl != NULL)) { + ret = psp_bootloader_load_spl(psp); + if (ret) { + DRM_ERROR("PSP load spl failed!\n"); + return ret; + } } - ret = psp_bootloader_load_sos(psp); - if (ret) { - DRM_ERROR("PSP load sos failed!\n"); - return ret; + if ((is_psp_fw_valid(psp->sys)) && + (psp->funcs->bootloader_load_sysdrv != NULL)) { + ret = psp_bootloader_load_sysdrv(psp); + if (ret) { + DRM_ERROR("PSP load sys drv failed!\n"); + return ret; + } + } + + if ((is_psp_fw_valid(psp->soc_drv)) && + (psp->funcs->bootloader_load_soc_drv != NULL)) { + ret = psp_bootloader_load_soc_drv(psp); + if (ret) { + DRM_ERROR("PSP load soc drv failed!\n"); + return ret; + } + } + + if ((is_psp_fw_valid(psp->intf_drv)) && + (psp->funcs->bootloader_load_intf_drv != NULL)) { + ret = psp_bootloader_load_intf_drv(psp); + if (ret) { + DRM_ERROR("PSP load intf drv failed!\n"); + return ret; + } + } + + if ((is_psp_fw_valid(psp->dbg_drv)) && + (psp->funcs->bootloader_load_dbg_drv != NULL)) { + ret = psp_bootloader_load_dbg_drv(psp); + if (ret) { + DRM_ERROR("PSP load dbg drv failed!\n"); + return ret; + } + } + + if ((is_psp_fw_valid(psp->sos)) && + (psp->funcs->bootloader_load_sos != NULL)) { + ret = psp_bootloader_load_sos(psp); + if (ret) { + DRM_ERROR("PSP load sos failed!\n"); + return ret; + } } } @@ -1205,51 +2023,23 @@ static int psp_hw_start(struct psp_context *psp) return ret; } - ret = psp_tmr_load(psp); - if (ret) { - DRM_ERROR("PSP load tmr failed!\n"); - return ret; - } - - ret = psp_asd_init(psp); - if (ret) { - DRM_ERROR("PSP asd init failed!\n"); - return ret; + /* + * For ASICs with DF Cstate management centralized + * to PMFW, TMR setup should be performed after PMFW + * loaded and before other non-psp firmware loaded. + */ + if (psp->pmfw_centralized_cstate_management) { + ret = psp_load_smu_fw(psp); + if (ret) + return ret; } - ret = psp_asd_load(psp); + ret = psp_tmr_load(psp); if (ret) { - DRM_ERROR("PSP load asd failed!\n"); + DRM_ERROR("PSP load tmr failed!\n"); return ret; } - if (adev->gmc.xgmi.num_physical_nodes > 1) { - ret = psp_xgmi_initialize(psp); - /* Warning the XGMI seesion initialize failure - * Instead of stop driver initialization - */ - if (ret) - dev_err(psp->adev->dev, - "XGMI: Failed to initialize XGMI session\n"); - } - - if (psp->adev->psp.ta_fw) { - ret = psp_ras_initialize(psp); - if (ret) - dev_err(psp->adev->dev, - "RAS: Failed to initialize RAS\n"); - - ret = psp_hdcp_initialize(psp); - if (ret) - dev_err(psp->adev->dev, - "HDCP: Failed to initialize HDCP\n"); - - ret = psp_dtm_initialize(psp); - if (ret) - dev_err(psp->adev->dev, - "DTM: Failed to initialize DTM\n"); - } - return 0; } @@ -1281,6 +2071,12 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_SDMA7: *type = GFX_FW_TYPE_SDMA7; break; + case AMDGPU_UCODE_ID_CP_MES: + *type = GFX_FW_TYPE_CP_MES; + break; + case AMDGPU_UCODE_ID_CP_MES_DATA: + *type = GFX_FW_TYPE_MES_STACK; + break; case AMDGPU_UCODE_ID_CP_CE: *type = GFX_FW_TYPE_CP_CE; break; @@ -1314,6 +2110,12 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; break; + case AMDGPU_UCODE_ID_RLC_IRAM: + *type = GFX_FW_TYPE_RLC_IRAM; + break; + case AMDGPU_UCODE_ID_RLC_DRAM: + *type = GFX_FW_TYPE_RLC_DRAM_BOOT; + break; case AMDGPU_UCODE_ID_SMC: *type = GFX_FW_TYPE_SMU; break; @@ -1329,6 +2131,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_VCN: *type = GFX_FW_TYPE_VCN; break; + case AMDGPU_UCODE_ID_VCN1: + *type = GFX_FW_TYPE_VCN1; + break; case AMDGPU_UCODE_ID_DMCU_ERAM: *type = GFX_FW_TYPE_DMCU_ERAM; break; @@ -1341,6 +2146,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_VCN1_RAM: *type = GFX_FW_TYPE_VCN1_RAM; break; + case AMDGPU_UCODE_ID_DMCUB: + *type = GFX_FW_TYPE_DMUB; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; @@ -1403,8 +2211,6 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, int ret; uint64_t fw_mem_mc_addr = ucode->mc_addr; - memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); - cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); @@ -1417,89 +2223,159 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, return ret; } -static int psp_execute_np_fw_load(struct psp_context *psp, - struct amdgpu_firmware_info *ucode) +static int psp_execute_non_psp_fw_load(struct psp_context *psp, + struct amdgpu_firmware_info *ucode) { int ret = 0; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd); + if (!ret) { + ret = psp_cmd_submit_buf(psp, ucode, cmd, + psp->fence_buf_mc_addr); + } + + release_psp_cmd_buf(psp); + + return ret; +} + +static int psp_load_smu_fw(struct psp_context *psp) +{ + int ret; + struct amdgpu_device *adev = psp->adev; + struct amdgpu_firmware_info *ucode = + &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; + struct amdgpu_ras *ras = psp->ras_context.ras; + + if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) + return 0; + + if ((amdgpu_in_reset(adev) && + ras && adev->ras_enabled && + (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) { + ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); + if (ret) { + DRM_WARN("Failed to set MP1 state prepare for reload\n"); + } + } + + ret = psp_execute_non_psp_fw_load(psp, ucode); - ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); if (ret) - return ret; + DRM_ERROR("PSP load smu failed!\n"); - ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, - psp->fence_buf_mc_addr); + return ret; +} +static bool fw_load_skip_check(struct psp_context *psp, + struct amdgpu_firmware_info *ucode) +{ + if (!ucode->fw) + return true; + + if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && + (psp_smu_reload_quirk(psp) || + psp->autoload_supported || + psp->pmfw_centralized_cstate_management)) + return true; + + if (amdgpu_sriov_vf(psp->adev) && + (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) + /*skip ucode loading in SRIOV VF */ + return true; + + if (psp->autoload_supported && + (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || + ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) + /* skip mec JT when autoload is enabled */ + return true; + + return false; +} + +int psp_load_fw_list(struct psp_context *psp, + struct amdgpu_firmware_info **ucode_list, int ucode_count) +{ + int ret = 0, i; + struct amdgpu_firmware_info *ucode; + + for (i = 0; i < ucode_count; ++i) { + ucode = ucode_list[i]; + psp_print_fw_hdr(psp, ucode); + ret = psp_execute_non_psp_fw_load(psp, ucode); + if (ret) + return ret; + } return ret; } -static int psp_np_fw_load(struct psp_context *psp) +static int psp_load_non_psp_fw(struct psp_context *psp) { int i, ret; struct amdgpu_firmware_info *ucode; - struct amdgpu_device* adev = psp->adev; - - if (psp->autoload_supported) { - ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; - if (!ucode->fw) - goto out; + struct amdgpu_device *adev = psp->adev; - ret = psp_execute_np_fw_load(psp, ucode); + if (psp->autoload_supported && + !psp->pmfw_centralized_cstate_management) { + ret = psp_load_smu_fw(psp); if (ret) return ret; } -out: for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; - if (!ucode->fw) - continue; if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && - (psp_smu_reload_quirk(psp) || psp->autoload_supported)) + !fw_load_skip_check(psp, ucode)) { + ret = psp_load_smu_fw(psp); + if (ret) + return ret; continue; + } - if (amdgpu_sriov_vf(adev) && - (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 - || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) - /*skip ucode loading in SRIOV VF */ + if (fw_load_skip_check(psp, ucode)) continue; if (psp->autoload_supported && - (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) - /* skip mec JT when autoload is enabled */ + (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) && + (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || + ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || + ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) + /* PSP only receive one SDMA fw for sienna_cichlid, + * as all four sdma fw are same */ continue; psp_print_fw_hdr(psp, ucode); - ret = psp_execute_np_fw_load(psp, ucode); + ret = psp_execute_non_psp_fw_load(psp, ucode); if (ret) return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { - ret = psp_rlc_autoload(psp); + ret = psp_rlc_autoload_start(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); return ret; } } -#if 0 - /* check if firmware loaded sucessfully */ - if (!amdgpu_psp_check_fw_loading_status(adev, i)) - return -EINVAL; -#endif } return 0; @@ -1510,26 +2386,28 @@ static int psp_load_fw(struct amdgpu_device *adev) int ret; struct psp_context *psp = &adev->psp; - if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ goto skip_memalloc; } - psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!psp->cmd) - return -ENOMEM; - - /* this fw pri bo is not used under SRIOV */ - if (!amdgpu_sriov_vf(psp->adev)) { + if (amdgpu_sriov_vf(adev)) { ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, - AMDGPU_GEM_DOMAIN_GTT, - &psp->fw_pri_bo, - &psp->fw_pri_mc_addr, - &psp->fw_pri_buf); - if (ret) - goto failed; + AMDGPU_GEM_DOMAIN_VRAM, + &psp->fw_pri_bo, + &psp->fw_pri_mc_addr, + &psp->fw_pri_buf); + } else { + ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, + AMDGPU_GEM_DOMAIN_GTT, + &psp->fw_pri_bo, + &psp->fw_pri_mc_addr, + &psp->fw_pri_buf); } + if (ret) + goto failed; + ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &psp->fence_buf_bo, @@ -1558,10 +2436,49 @@ skip_memalloc: if (ret) goto failed; - ret = psp_np_fw_load(psp); + ret = psp_load_non_psp_fw(psp); if (ret) goto failed; + ret = psp_asd_initialize(psp); + if (ret) { + DRM_ERROR("PSP load asd failed!\n"); + return ret; + } + + ret = psp_rl_load(adev); + if (ret) { + DRM_ERROR("PSP load RL failed!\n"); + return ret; + } + + if (psp->ta_fw) { + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); + + ret = psp_rap_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAP: Failed to initialize RAP\n"); + + ret = psp_securedisplay_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); + } + return 0; failed: @@ -1606,35 +2523,27 @@ static int psp_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - void *tmr_buf; - void **pptr; - - if (adev->gmc.xgmi.num_physical_nodes > 1 && - psp->xgmi_context.initialized == 1) - psp_xgmi_terminate(psp); - if (psp->adev->psp.ta_fw) { + if (psp->ta_fw) { psp_ras_terminate(psp); + psp_securedisplay_terminate(psp); + psp_rap_terminate(psp); psp_dtm_terminate(psp); psp_hdcp_terminate(psp); } + psp_asd_terminate(psp); + + psp_tmr_terminate(psp); psp_ring_destroy(psp, PSP_RING_TYPE__KM); - pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; - amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); amdgpu_bo_free_kernel(&psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); - amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr, - &psp->asd_shared_buf); amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, (void **)&psp->cmd_buf_mem); - kfree(psp->cmd); - psp->cmd = NULL; - return 0; } @@ -1645,7 +2554,7 @@ static int psp_suspend(void *handle) struct psp_context *psp = &adev->psp; if (adev->gmc.xgmi.num_physical_nodes > 1 && - psp->xgmi_context.initialized == 1) { + psp->xgmi_context.context.initialized) { ret = psp_xgmi_terminate(psp); if (ret) { DRM_ERROR("Failed to terminate xgmi ta\n"); @@ -1653,7 +2562,7 @@ static int psp_suspend(void *handle) } } - if (psp->adev->psp.ta_fw) { + if (psp->ta_fw) { ret = psp_ras_terminate(psp); if (ret) { DRM_ERROR("Failed to terminate ras ta\n"); @@ -1669,6 +2578,28 @@ static int psp_suspend(void *handle) DRM_ERROR("Failed to terminate dtm ta\n"); return ret; } + ret = psp_rap_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate rap ta\n"); + return ret; + } + ret = psp_securedisplay_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate securedisplay ta\n"); + return ret; + } + } + + ret = psp_asd_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate asd\n"); + return ret; + } + + ret = psp_tmr_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate tmr\n"); + return ret; } ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); @@ -1688,10 +2619,12 @@ static int psp_resume(void *handle) DRM_INFO("PSP is resuming...\n"); - ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); - if (ret) { - DRM_ERROR("Failed to process memory training!\n"); - return ret; + if (psp->mem_train_ctx.enable_mem_training) { + ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } } mutex_lock(&adev->firmware.mutex); @@ -1700,10 +2633,59 @@ static int psp_resume(void *handle) if (ret) goto failed; - ret = psp_np_fw_load(psp); + ret = psp_load_non_psp_fw(psp); if (ret) goto failed; + ret = psp_asd_initialize(psp); + if (ret) { + DRM_ERROR("PSP load asd failed!\n"); + goto failed; + } + + ret = psp_rl_load(adev); + if (ret) { + dev_err(adev->dev, "PSP load RL failed!\n"); + goto failed; + } + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp, false, true); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } + + if (psp->ta_fw) { + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); + + ret = psp_rap_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAP: Failed to initialize RAP\n"); + + ret = psp_securedisplay_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); + } + mutex_unlock(&adev->firmware.mutex); return 0; @@ -1731,17 +2713,15 @@ int psp_gpu_reset(struct amdgpu_device *adev) int psp_rlc_autoload_start(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd; - - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - kfree(cmd); + + release_psp_cmd_buf(psp); + return ret; } @@ -1755,22 +2735,470 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, ucode.mc_addr = cmd_gpu_addr; ucode.ucode_size = cmd_size; - return psp_execute_np_fw_load(&adev->psp, &ucode); + return psp_execute_non_psp_fw_load(&adev->psp, &ucode); } -static bool psp_check_fw_loading_status(struct amdgpu_device *adev, - enum AMDGPU_UCODE_ID ucode_type) +int psp_ring_cmd_submit(struct psp_context *psp, + uint64_t cmd_buf_mc_addr, + uint64_t fence_mc_addr, + int index) { - struct amdgpu_firmware_info *ucode = NULL; + unsigned int psp_write_ptr_reg = 0; + struct psp_gfx_rb_frame *write_frame; + struct psp_ring *ring = &psp->km_ring; + struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; + struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + + ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; + struct amdgpu_device *adev = psp->adev; + uint32_t ring_size_dw = ring->ring_size / 4; + uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - if (!adev->firmware.fw_size) - return false; + /* KM (GPCOM) prepare write pointer */ + psp_write_ptr_reg = psp_ring_get_wptr(psp); - ucode = &adev->firmware.ucode[ucode_type]; - if (!ucode->fw || !ucode->ucode_size) - return false; + /* Update KM RB frame pointer to new frame */ + /* write_frame ptr increments by size of rb_frame in bytes */ + /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ + if ((psp_write_ptr_reg % ring_size_dw) == 0) + write_frame = ring_buffer_start; + else + write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); + /* Check invalid write_frame ptr address */ + if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { + DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", + ring_buffer_start, ring_buffer_end, write_frame); + DRM_ERROR("write_frame is pointing to address out of bounds\n"); + return -EINVAL; + } + + /* Initialize KM RB frame */ + memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); + + /* Update KM RB frame */ + write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); + write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); + write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); + write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); + write_frame->fence_value = index; + amdgpu_device_flush_hdp(adev, NULL); + + /* Update the write Pointer in DWORDs */ + psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; + psp_ring_set_wptr(psp, psp_write_ptr_reg); + return 0; +} + +int psp_init_asd_microcode(struct psp_context *psp, + const char *chip_name) +{ + struct amdgpu_device *adev = psp->adev; + char fw_name[PSP_FW_NAME_LEN]; + const struct psp_firmware_header_v1_0 *asd_hdr; + int err = 0; + + if (!chip_name) { + dev_err(adev->dev, "invalid chip name for asd microcode\n"); + return -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); + err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.asd_fw); + if (err) + goto out; + + asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; + adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); + adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); + adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); + adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); + return 0; +out: + dev_err(adev->dev, "fail to initialize asd microcode\n"); + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; + return err; +} + +int psp_init_toc_microcode(struct psp_context *psp, + const char *chip_name) +{ + struct amdgpu_device *adev = psp->adev; + char fw_name[PSP_FW_NAME_LEN]; + const struct psp_firmware_header_v1_0 *toc_hdr; + int err = 0; + + if (!chip_name) { + dev_err(adev->dev, "invalid chip name for toc microcode\n"); + return -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); + err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.toc_fw); + if (err) + goto out; + + toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; + adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); + adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); + adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); + adev->psp.toc.start_addr = (uint8_t *)toc_hdr + + le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); + return 0; +out: + dev_err(adev->dev, "fail to request/validate toc microcode\n"); + release_firmware(adev->psp.toc_fw); + adev->psp.toc_fw = NULL; + return err; +} + +static int parse_sos_bin_descriptor(struct psp_context *psp, + const struct psp_fw_bin_desc *desc, + const struct psp_firmware_header_v2_0 *sos_hdr) +{ + uint8_t *ucode_start_addr = NULL; + + if (!psp || !desc || !sos_hdr) + return -EINVAL; + + ucode_start_addr = (uint8_t *)sos_hdr + + le32_to_cpu(desc->offset_bytes) + + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); + + switch (desc->fw_type) { + case PSP_FW_TYPE_PSP_SOS: + psp->sos.fw_version = le32_to_cpu(desc->fw_version); + psp->sos.feature_version = le32_to_cpu(desc->fw_version); + psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); + psp->sos.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_SYS_DRV: + psp->sys.fw_version = le32_to_cpu(desc->fw_version); + psp->sys.feature_version = le32_to_cpu(desc->fw_version); + psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); + psp->sys.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_KDB: + psp->kdb.fw_version = le32_to_cpu(desc->fw_version); + psp->kdb.feature_version = le32_to_cpu(desc->fw_version); + psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); + psp->kdb.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_TOC: + psp->toc.fw_version = le32_to_cpu(desc->fw_version); + psp->toc.feature_version = le32_to_cpu(desc->fw_version); + psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->toc.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_SPL: + psp->spl.fw_version = le32_to_cpu(desc->fw_version); + psp->spl.feature_version = le32_to_cpu(desc->fw_version); + psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); + psp->spl.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_RL: + psp->rl.fw_version = le32_to_cpu(desc->fw_version); + psp->rl.feature_version = le32_to_cpu(desc->fw_version); + psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); + psp->rl.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_SOC_DRV: + psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); + psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); + psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); + psp->soc_drv.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_INTF_DRV: + psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); + psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); + psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); + psp->intf_drv.start_addr = ucode_start_addr; + break; + case PSP_FW_TYPE_PSP_DBG_DRV: + psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); + psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); + psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); + psp->dbg_drv.start_addr = ucode_start_addr; + break; + default: + dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); + break; + } + + return 0; +} + +static int psp_init_sos_base_fw(struct amdgpu_device *adev) +{ + const struct psp_firmware_header_v1_0 *sos_hdr; + const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; + uint8_t *ucode_array_start_addr; - return psp_compare_sram_data(&adev->psp, ucode, ucode_type); + sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; + ucode_array_start_addr = (uint8_t *)sos_hdr + + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); + + if (adev->gmc.xgmi.connected_to_cpu || + (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) { + adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); + adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); + + adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); + adev->psp.sys.start_addr = ucode_array_start_addr; + + adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); + adev->psp.sos.start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr->sos.offset_bytes); + adev->psp.xgmi_context.supports_extended_data = false; + } else { + /* Load alternate PSP SOS FW */ + sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; + + adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); + adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); + + adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); + adev->psp.sys.start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); + + adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); + adev->psp.sos.start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); + adev->psp.xgmi_context.supports_extended_data = true; + } + + if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { + dev_warn(adev->dev, "PSP SOS FW not available"); + return -EINVAL; + } + + return 0; +} + +int psp_init_sos_microcode(struct psp_context *psp, + const char *chip_name) +{ + struct amdgpu_device *adev = psp->adev; + char fw_name[PSP_FW_NAME_LEN]; + const struct psp_firmware_header_v1_0 *sos_hdr; + const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; + const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; + const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; + const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; + int err = 0; + uint8_t *ucode_array_start_addr; + int fw_index = 0; + + if (!chip_name) { + dev_err(adev->dev, "invalid chip name for sos microcode\n"); + return -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); + err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.sos_fw); + if (err) + goto out; + + sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; + ucode_array_start_addr = (uint8_t *)sos_hdr + + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); + amdgpu_ucode_print_psp_hdr(&sos_hdr->header); + + switch (sos_hdr->header.header_version_major) { + case 1: + err = psp_init_sos_base_fw(adev); + if (err) + goto out; + + if (sos_hdr->header.header_version_minor == 1) { + sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; + adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); + adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + + le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); + adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); + adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + + le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); + } + if (sos_hdr->header.header_version_minor == 2) { + sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; + adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); + adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + + le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); + } + if (sos_hdr->header.header_version_minor == 3) { + sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; + adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); + adev->psp.toc.start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); + adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); + adev->psp.kdb.start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); + adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); + adev->psp.spl.start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); + adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); + adev->psp.rl.start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); + } + break; + case 2: + sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; + + if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { + dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); + err = -EINVAL; + goto out; + } + + for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { + err = parse_sos_bin_descriptor(psp, + &sos_hdr_v2_0->psp_fw_bin[fw_index], + sos_hdr_v2_0); + if (err) + goto out; + } + break; + default: + dev_err(adev->dev, + "unsupported psp sos firmware\n"); + err = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(adev->dev, + "failed to init sos firmware\n"); + release_firmware(adev->psp.sos_fw); + adev->psp.sos_fw = NULL; + + return err; +} + +static int parse_ta_bin_descriptor(struct psp_context *psp, + const struct psp_fw_bin_desc *desc, + const struct ta_firmware_header_v2_0 *ta_hdr) +{ + uint8_t *ucode_start_addr = NULL; + + if (!psp || !desc || !ta_hdr) + return -EINVAL; + + ucode_start_addr = (uint8_t *)ta_hdr + + le32_to_cpu(desc->offset_bytes) + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + switch (desc->fw_type) { + case TA_FW_TYPE_PSP_ASD: + psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->asd_context.bin_desc.start_addr = ucode_start_addr; + break; + case TA_FW_TYPE_PSP_XGMI: + psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; + break; + case TA_FW_TYPE_PSP_RAS: + psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; + break; + case TA_FW_TYPE_PSP_HDCP: + psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; + break; + case TA_FW_TYPE_PSP_DTM: + psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; + break; + case TA_FW_TYPE_PSP_RAP: + psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; + break; + case TA_FW_TYPE_PSP_SECUREDISPLAY: + psp->securedisplay_context.context.bin_desc.fw_version = + le32_to_cpu(desc->fw_version); + psp->securedisplay_context.context.bin_desc.size_bytes = + le32_to_cpu(desc->size_bytes); + psp->securedisplay_context.context.bin_desc.start_addr = + ucode_start_addr; + break; + default: + dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); + break; + } + + return 0; +} + +int psp_init_ta_microcode(struct psp_context *psp, + const char *chip_name) +{ + struct amdgpu_device *adev = psp->adev; + char fw_name[PSP_FW_NAME_LEN]; + const struct ta_firmware_header_v2_0 *ta_hdr; + int err = 0; + int ta_index = 0; + + if (!chip_name) { + dev_err(adev->dev, "invalid chip name for ta microcode\n"); + return -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out; + + ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; + + if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) { + dev_err(adev->dev, "unsupported TA header version\n"); + err = -EINVAL; + goto out; + } + + if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { + dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); + err = -EINVAL; + goto out; + } + + for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { + err = parse_ta_bin_descriptor(psp, + &ta_hdr->ta_fw_bin[ta_index], + ta_hdr); + if (err) + goto out; + } + + return 0; +out: + dev_err(adev->dev, "fail to initialize ta microcode\n"); + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + return err; } static int psp_set_clockgating_state(void *handle, @@ -1785,6 +3213,110 @@ static int psp_set_powergating_state(void *handle, return 0; } +static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + uint32_t fw_ver; + int ret; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + mutex_lock(&adev->psp.mutex); + ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); + mutex_unlock(&adev->psp.mutex); + + if (ret) { + DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); + return ret; + } + + return sysfs_emit(buf, "%x\n", fw_ver); +} + +static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + int ret, idx; + char fw_name[100]; + const struct firmware *usbc_pd_fw; + struct amdgpu_bo *fw_buf_bo = NULL; + uint64_t fw_pri_mc_addr; + void *fw_pri_cpu_addr; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + if (!drm_dev_enter(ddev, &idx)) + return -ENODEV; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); + ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); + if (ret) + goto fail; + + /* LFB address which is aligned to 1MB boundary per PSP request */ + ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, + AMDGPU_GEM_DOMAIN_VRAM, + &fw_buf_bo, + &fw_pri_mc_addr, + &fw_pri_cpu_addr); + if (ret) + goto rel_buf; + + memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); + + mutex_lock(&adev->psp.mutex); + ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); + mutex_unlock(&adev->psp.mutex); + + amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); + +rel_buf: + release_firmware(usbc_pd_fw); +fail: + if (ret) { + DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); + count = ret; + } + + drm_dev_exit(idx); + return count; +} + +void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) +{ + int idx; + + if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) + return; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, start_addr, bin_size); + + drm_dev_exit(idx); +} + +static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, + psp_usbc_pd_fw_sysfs_read, + psp_usbc_pd_fw_sysfs_write); + +int is_psp_fw_valid(struct psp_bin_desc bin) +{ + return bin.size_bytes; +} + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, @@ -1803,14 +3335,19 @@ const struct amd_ip_funcs psp_ip_funcs = { .set_powergating_state = psp_set_powergating_state, }; -static const struct amdgpu_psp_funcs psp_funcs = { - .check_fw_loading_status = psp_check_fw_loading_status, -}; +static int psp_sysfs_init(struct amdgpu_device *adev) +{ + int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); -static void psp_set_funcs(struct amdgpu_device *adev) + if (ret) + DRM_ERROR("Failed to create USBC PD FW control file!"); + + return ret; +} + +static void psp_sysfs_fini(struct amdgpu_device *adev) { - if (NULL == adev->firmware.funcs) - adev->firmware.funcs = &psp_funcs; + device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); } const struct amdgpu_ip_block_version psp_v3_1_ip_block = @@ -1840,6 +3377,14 @@ const struct amdgpu_ip_block_version psp_v11_0_ip_block = .funcs = &psp_ip_funcs, }; +const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { + .type = AMD_IP_BLOCK_TYPE_PSP, + .major = 11, + .minor = 0, + .rev = 8, + .funcs = &psp_ip_funcs, +}; + const struct amdgpu_ip_block_version psp_v12_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_PSP, @@ -1848,3 +3393,11 @@ const struct amdgpu_ip_block_version psp_v12_0_ip_block = .rev = 0, .funcs = &psp_ip_funcs, }; + +const struct amdgpu_ip_block_version psp_v13_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_PSP, + .major = 13, + .minor = 0, + .rev = 0, + .funcs = &psp_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 09c5474ebcc3..f29afabbff1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -29,28 +29,40 @@ #include "psp_gfx_if.h" #include "ta_xgmi_if.h" #include "ta_ras_if.h" +#include "ta_rap_if.h" +#include "ta_secureDisplay_if.h" #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 -#define PSP_ASD_SHARED_MEM_SIZE 0x4000 -#define PSP_XGMI_SHARED_MEM_SIZE 0x4000 -#define PSP_RAS_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 -#define PSP_TMR_SIZE 0x400000 -#define PSP_HDCP_SHARED_MEM_SIZE 0x4000 -#define PSP_DTM_SHARED_MEM_SIZE 0x4000 -#define PSP_SHARED_MEM_SIZE 0x4000 +#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000) +#define PSP_FW_NAME_LEN 0x24 + +enum psp_shared_mem_size { + PSP_ASD_SHARED_MEM_SIZE = 0x0, + PSP_XGMI_SHARED_MEM_SIZE = 0x4000, + PSP_RAS_SHARED_MEM_SIZE = 0x4000, + PSP_HDCP_SHARED_MEM_SIZE = 0x4000, + PSP_DTM_SHARED_MEM_SIZE = 0x4000, + PSP_RAP_SHARED_MEM_SIZE = 0x4000, + PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000, +}; struct psp_context; struct psp_xgmi_node_info; struct psp_xgmi_topology_info; +struct psp_bin_desc; enum psp_bootloader_cmd { PSP_BL__LOAD_SYSDRV = 0x10000, PSP_BL__LOAD_SOSDRV = 0x20000, PSP_BL__LOAD_KEY_DATABASE = 0x80000, + PSP_BL__LOAD_SOCDRV = 0xB0000, + PSP_BL__LOAD_INTFDRV = 0xC0000, + PSP_BL__LOAD_DBGDRV = 0xD0000, PSP_BL__DRAM_LONG_TRAIN = 0x100000, PSP_BL__DRAM_SHORT_TRAIN = 0x200000, + PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000, }; enum psp_ring_type @@ -71,6 +83,7 @@ struct psp_ring uint64_t ring_mem_mc_addr; void *ring_mem_handle; uint32_t ring_size; + uint32_t ring_wptr; }; /* More registers may will be supported */ @@ -85,7 +98,11 @@ struct psp_funcs { int (*init_microcode)(struct psp_context *psp); int (*bootloader_load_kdb)(struct psp_context *psp); + int (*bootloader_load_spl)(struct psp_context *psp); int (*bootloader_load_sysdrv)(struct psp_context *psp); + int (*bootloader_load_soc_drv)(struct psp_context *psp); + int (*bootloader_load_intf_drv)(struct psp_context *psp); + int (*bootloader_load_dbg_drv)(struct psp_context *psp); int (*bootloader_load_sos)(struct psp_context *psp); int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_create)(struct psp_context *psp, @@ -94,28 +111,13 @@ struct psp_funcs enum psp_ring_type ring_type); int (*ring_destroy)(struct psp_context *psp, enum psp_ring_type ring_type); - int (*cmd_submit)(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index); - bool (*compare_sram_data)(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type); bool (*smu_reload_quirk)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp); - int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id); - int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id); - int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, - struct psp_xgmi_topology_info *topology); - int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, - struct psp_xgmi_topology_info *topology); - bool (*support_vmr_ring)(struct psp_context *psp); - int (*ras_trigger_error)(struct psp_context *psp, - struct ta_ras_trigger_error_input *info); - int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr); - int (*rlc_autoload_start)(struct psp_context *psp); - int (*mem_training_init)(struct psp_context *psp); - void (*mem_training_fini)(struct psp_context *psp); int (*mem_training)(struct psp_context *psp, uint32_t ops); + uint32_t (*ring_get_wptr)(struct psp_context *psp); + void (*ring_set_wptr)(struct psp_context *psp, uint32_t value); + int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr); + int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -124,6 +126,7 @@ struct psp_xgmi_node_info { uint8_t num_hops; uint8_t is_sharing_enabled; enum ta_xgmi_assigned_sdma_engine sdma_engine; + uint8_t num_links; }; struct psp_xgmi_topology_info { @@ -131,44 +134,49 @@ struct psp_xgmi_topology_info { struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; }; -struct psp_xgmi_context { - uint8_t initialized; - uint32_t session_id; - struct amdgpu_bo *xgmi_shared_bo; - uint64_t xgmi_shared_mc_addr; - void *xgmi_shared_buf; - struct psp_xgmi_topology_info top_info; +struct psp_bin_desc { + uint32_t fw_version; + uint32_t feature_version; + uint32_t size_bytes; + uint8_t *start_addr; }; -struct psp_ras_context { - /*ras fw*/ - bool ras_initialized; - uint32_t session_id; - struct amdgpu_bo *ras_shared_bo; - uint64_t ras_shared_mc_addr; - void *ras_shared_buf; - struct amdgpu_ras *ras; +struct ta_mem_context { + struct amdgpu_bo *shared_bo; + uint64_t shared_mc_addr; + void *shared_buf; + enum psp_shared_mem_size shared_mem_size; }; -struct psp_hdcp_context { - bool hdcp_initialized; +struct ta_context { + bool initialized; uint32_t session_id; - struct amdgpu_bo *hdcp_shared_bo; - uint64_t hdcp_shared_mc_addr; - void *hdcp_shared_buf; + struct ta_mem_context mem_context; + struct psp_bin_desc bin_desc; + enum psp_gfx_cmd_id ta_load_type; }; -struct psp_dtm_context { - bool dtm_initialized; - uint32_t session_id; - struct amdgpu_bo *dtm_shared_bo; - uint64_t dtm_shared_mc_addr; - void *dtm_shared_buf; +struct ta_cp_context { + struct ta_context context; + struct mutex mutex; +}; + +struct psp_xgmi_context { + struct ta_context context; + struct psp_xgmi_topology_info top_info; + bool supports_extended_data; +}; + +struct psp_ras_context { + struct ta_context context; + struct amdgpu_ras *ras; }; #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 +/*Define the VRAM size that will be encroached by BIST training.*/ +#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000 enum psp_memory_training_init_flag { PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, @@ -199,7 +207,6 @@ struct psp_memory_training_context { /*vram offset of the p2c training data*/ u64 p2c_train_data_offset; - struct amdgpu_bo *p2c_bo; /*vram offset of the c2p training data*/ u64 c2p_train_data_offset; @@ -207,6 +214,61 @@ struct psp_memory_training_context { enum psp_memory_training_init_flag init; u32 training_cnt; + bool enable_mem_training; +}; + +/** PSP runtime DB **/ +#define PSP_RUNTIME_DB_SIZE_IN_BYTES 0x10000 +#define PSP_RUNTIME_DB_OFFSET 0x100000 +#define PSP_RUNTIME_DB_COOKIE_ID 0x0ed5 +#define PSP_RUNTIME_DB_VER_1 0x0100 +#define PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT 0x40 + +enum psp_runtime_entry_type { + PSP_RUNTIME_ENTRY_TYPE_INVALID = 0x0, + PSP_RUNTIME_ENTRY_TYPE_TEST = 0x1, + PSP_RUNTIME_ENTRY_TYPE_MGPU_COMMON = 0x2, /* Common mGPU runtime data */ + PSP_RUNTIME_ENTRY_TYPE_MGPU_WAFL = 0x3, /* WAFL runtime data */ + PSP_RUNTIME_ENTRY_TYPE_MGPU_XGMI = 0x4, /* XGMI runtime data */ + PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG = 0x5, /* Boot Config runtime data */ +}; + +/* PSP runtime DB header */ +struct psp_runtime_data_header { + /* determine the existence of runtime db */ + uint16_t cookie; + /* version of runtime db */ + uint16_t version; +}; + +/* PSP runtime DB entry */ +struct psp_runtime_entry { + /* type of runtime db entry */ + uint32_t entry_type; + /* offset of entry in bytes */ + uint16_t offset; + /* size of entry in bytes */ + uint16_t size; +}; + +/* PSP runtime DB directory */ +struct psp_runtime_data_directory { + /* number of valid entries */ + uint16_t entry_count; + /* db entries*/ + struct psp_runtime_entry entry_list[PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT]; +}; + +/* PSP runtime DB boot config feature bitmask */ +enum psp_runtime_boot_cfg_feature { + BOOT_CFG_FEATURE_GECC = 0x1, + BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING = 0x2, +}; + +/* PSP runtime DB boot config entry */ +struct psp_runtime_boot_cfg_entry { + uint32_t boot_cfg_bitmask; + uint32_t reserved; }; struct psp_context @@ -224,30 +286,25 @@ struct psp_context /* sos firmware */ const struct firmware *sos_fw; - uint32_t sos_fw_version; - uint32_t sos_feature_version; - uint32_t sys_bin_size; - uint32_t sos_bin_size; - uint32_t toc_bin_size; - uint32_t kdb_bin_size; - uint8_t *sys_start_addr; - uint8_t *sos_start_addr; - uint8_t *toc_start_addr; - uint8_t *kdb_start_addr; + struct psp_bin_desc sys; + struct psp_bin_desc sos; + struct psp_bin_desc toc; + struct psp_bin_desc kdb; + struct psp_bin_desc spl; + struct psp_bin_desc rl; + struct psp_bin_desc soc_drv; + struct psp_bin_desc intf_drv; + struct psp_bin_desc dbg_drv; /* tmr buffer */ struct amdgpu_bo *tmr_bo; uint64_t tmr_mc_addr; - /* asd firmware and buffer */ - const struct firmware *asd_fw; - uint32_t asd_fw_version; - uint32_t asd_feature_version; - uint32_t asd_ucode_size; - uint8_t *asd_start_addr; - struct amdgpu_bo *asd_shared_bo; - uint64_t asd_shared_mc_addr; - void *asd_shared_buf; + /* asd firmware */ + const struct firmware *asd_fw; + + /* toc firmware */ + const struct firmware *toc_fw; /* fence buffer */ struct amdgpu_bo *fence_buf_bo; @@ -263,31 +320,24 @@ struct psp_context atomic_t fence_value; /* flag to mark whether gfx fw autoload is supported or not */ bool autoload_supported; + /* flag to mark whether df cstate management centralized to PMFW */ + bool pmfw_centralized_cstate_management; /* xgmi ta firmware and buffer */ const struct firmware *ta_fw; uint32_t ta_fw_version; - uint32_t ta_xgmi_ucode_version; - uint32_t ta_xgmi_ucode_size; - uint8_t *ta_xgmi_start_addr; - uint32_t ta_ras_ucode_version; - uint32_t ta_ras_ucode_size; - uint8_t *ta_ras_start_addr; - - uint32_t ta_hdcp_ucode_version; - uint32_t ta_hdcp_ucode_size; - uint8_t *ta_hdcp_start_addr; - - uint32_t ta_dtm_ucode_version; - uint32_t ta_dtm_ucode_size; - uint8_t *ta_dtm_start_addr; + struct ta_context asd_context; struct psp_xgmi_context xgmi_context; - struct psp_ras_context ras; - struct psp_hdcp_context hdcp_context; - struct psp_dtm_context dtm_context; + struct psp_ras_context ras_context; + struct ta_cp_context hdcp_context; + struct ta_cp_context dtm_context; + struct ta_cp_context rap_context; + struct ta_cp_context securedisplay_context; struct mutex mutex; struct psp_memory_training_context mem_train_ctx; + + uint32_t boot_cfg_bitmask; }; struct amdgpu_psp_funcs { @@ -300,76 +350,102 @@ struct amdgpu_psp_funcs { #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type)) #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type)) #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type))) -#define psp_cmd_submit(psp, cmd_mc, fence_mc, index) \ - (psp)->funcs->cmd_submit((psp), (cmd_mc), (fence_mc), (index)) -#define psp_compare_sram_data(psp, ucode, type) \ - (psp)->funcs->compare_sram_data((psp), (ucode), (type)) #define psp_init_microcode(psp) \ ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0) #define psp_bootloader_load_kdb(psp) \ ((psp)->funcs->bootloader_load_kdb ? (psp)->funcs->bootloader_load_kdb((psp)) : 0) +#define psp_bootloader_load_spl(psp) \ + ((psp)->funcs->bootloader_load_spl ? (psp)->funcs->bootloader_load_spl((psp)) : 0) #define psp_bootloader_load_sysdrv(psp) \ ((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0) +#define psp_bootloader_load_soc_drv(psp) \ + ((psp)->funcs->bootloader_load_soc_drv ? (psp)->funcs->bootloader_load_soc_drv((psp)) : 0) +#define psp_bootloader_load_intf_drv(psp) \ + ((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0) +#define psp_bootloader_load_dbg_drv(psp) \ + ((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0) #define psp_bootloader_load_sos(psp) \ ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) -#define psp_support_vmr_ring(psp) \ - ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false) #define psp_mode1_reset(psp) \ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) -#define psp_xgmi_get_node_id(psp, node_id) \ - ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL) -#define psp_xgmi_get_hive_id(psp, hive_id) \ - ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL) -#define psp_xgmi_get_topology_info(psp, num_device, topology) \ - ((psp)->funcs->xgmi_get_topology_info ? \ - (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL) -#define psp_xgmi_set_topology_info(psp, num_device, topology) \ - ((psp)->funcs->xgmi_set_topology_info ? \ - (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL) -#define psp_rlc_autoload(psp) \ - ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0) -#define psp_mem_training_init(psp) \ - ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0) -#define psp_mem_training_fini(psp) \ - ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0) #define psp_mem_training(psp, ops) \ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0) -#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) +#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp)) +#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value)) -#define psp_ras_trigger_error(psp, info) \ - ((psp)->funcs->ras_trigger_error ? \ - (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL) -#define psp_ras_cure_posion(psp, addr) \ - ((psp)->funcs->ras_cure_posion ? \ - (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL) +#define psp_load_usbc_pd_fw(psp, fw_pri_mc_addr) \ + ((psp)->funcs->load_usbc_pd_fw ? \ + (psp)->funcs->load_usbc_pd_fw((psp), (fw_pri_mc_addr)) : -EINVAL) + +#define psp_read_usbc_pd_fw(psp, fw_ver) \ + ((psp)->funcs->read_usbc_pd_fw ? \ + (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL) extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; -extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, - uint32_t field_val, uint32_t mask, bool check_changed); - extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; +extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; +extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block; extern const struct amdgpu_ip_block_version psp_v12_0_ip_block; +extern const struct amdgpu_ip_block_version psp_v13_0_ip_block; + +extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, + uint32_t field_val, uint32_t mask, bool check_changed); int psp_gpu_reset(struct amdgpu_device *adev); int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, uint64_t cmd_gpu_addr, int cmd_size); +int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta); +int psp_xgmi_terminate(struct psp_context *psp); int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id); +int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id); +int psp_xgmi_get_topology_info(struct psp_context *psp, + int number_devices, + struct psp_xgmi_topology_info *topology, + bool get_extended_data); +int psp_xgmi_set_topology_info(struct psp_context *psp, + int number_devices, + struct psp_xgmi_topology_info *topology); int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable); +int psp_ras_trigger_error(struct psp_context *psp, + struct ta_ras_trigger_error_input *info); + int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status); +int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rlc_autoload_start(struct psp_context *psp); -extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, uint32_t value); +int psp_ring_cmd_submit(struct psp_context *psp, + uint64_t cmd_buf_mc_addr, + uint64_t fence_mc_addr, + int index); +int psp_init_asd_microcode(struct psp_context *psp, + const char *chip_name); +int psp_init_toc_microcode(struct psp_context *psp, + const char *chip_name); +int psp_init_sos_microcode(struct psp_context *psp, + const char *chip_name); +int psp_init_ta_microcode(struct psp_context *psp, + const char *chip_name); +int psp_get_fw_attestation_records_addr(struct psp_context *psp, + uint64_t *output_ptr); + +int psp_load_fw_list(struct psp_context *psp, + struct amdgpu_firmware_info **ucode_list, int ucode_count); +void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); + +int is_psp_fw_valid(struct psp_bin_desc bin); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c new file mode 100644 index 000000000000..12010c988c8b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c @@ -0,0 +1,128 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> + +#include "amdgpu.h" +#include "amdgpu_rap.h" + +/** + * DOC: AMDGPU RAP debugfs test interface + * + * how to use? + * echo opcode > <debugfs_dir>/dri/xxx/rap_test + * + * opcode: + * currently, only 2 is supported by Linux host driver, + * opcode 2 stands for TA_CMD_RAP__VALIDATE_L0, used to + * trigger L0 policy validation, you can refer more detail + * from header file ta_rap_if.h + * + */ +static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct ta_rap_shared_memory *rap_shared_mem; + struct ta_rap_cmd_output_data *rap_cmd_output; + struct drm_device *dev = adev_to_drm(adev); + uint32_t op; + enum ta_rap_status status; + int ret; + + if (*pos || size != 2) + return -EINVAL; + + ret = kstrtouint_from_user(buf, size, *pos, &op); + if (ret) + return ret; + + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); + return ret; + } + + /* make sure gfx core is on, RAP TA cann't handle + * GFX OFF case currently. + */ + amdgpu_gfx_off_ctrl(adev, false); + + switch (op) { + case 2: + ret = psp_rap_invoke(&adev->psp, op, &status); + if (!ret && status == TA_RAP_STATUS__SUCCESS) { + dev_info(adev->dev, "RAP L0 validate test success.\n"); + } else { + rap_shared_mem = (struct ta_rap_shared_memory *) + adev->psp.rap_context.context.mem_context.shared_buf; + rap_cmd_output = &(rap_shared_mem->rap_out_message.output); + + dev_info(adev->dev, "RAP test failed, the output is:\n"); + dev_info(adev->dev, "\tlast_subsection: 0x%08x.\n", + rap_cmd_output->last_subsection); + dev_info(adev->dev, "\tnum_total_validate: 0x%08x.\n", + rap_cmd_output->num_total_validate); + dev_info(adev->dev, "\tnum_valid: 0x%08x.\n", + rap_cmd_output->num_valid); + dev_info(adev->dev, "\tlast_validate_addr: 0x%08x.\n", + rap_cmd_output->last_validate_addr); + dev_info(adev->dev, "\tlast_validate_val: 0x%08x.\n", + rap_cmd_output->last_validate_val); + dev_info(adev->dev, "\tlast_validate_val_exptd: 0x%08x.\n", + rap_cmd_output->last_validate_val_exptd); + } + break; + default: + dev_info(adev->dev, "Unsupported op id: %d, ", op); + dev_info(adev->dev, "Only support op 2(L0 validate test).\n"); + break; + } + + amdgpu_gfx_off_ctrl(adev, true); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return size; +} + +static const struct file_operations amdgpu_rap_debugfs_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_rap_debugfs_write, + .llseek = default_llseek +}; + +void amdgpu_rap_debugfs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + + if (!adev->psp.rap_context.context.initialized) + return; + + debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root, + adev, &amdgpu_rap_debugfs_ops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h new file mode 100644 index 000000000000..ec6d7632d3a0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _AMDGPU_RAP_H +#define _AMDGPU_RAP_H + +#include "amdgpu.h" + +void amdgpu_rap_debugfs_init(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 404483437bd3..08133de21fdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -27,11 +27,20 @@ #include <linux/uaccess.h> #include <linux/reboot.h> #include <linux/syscalls.h> +#include <linux/pm_runtime.h> #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" +#include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" +#include "atom.h" +#ifdef CONFIG_X86_MCE_AMD +#include <asm/mce.h> + +static bool notifier_registered; +#endif +static const char *RAS_FS_NAME = "ras"; const char *ras_error_string[] = { "none", @@ -56,18 +65,40 @@ const char *ras_block_string[] = { "mp0", "mp1", "fuse", + "mca", +}; + +const char *ras_mca_block_string[] = { + "mca_mp0", + "mca_mp1", + "mca_mpio", + "mca_iohc", }; +const char *get_ras_block_str(struct ras_common_if *ras_block) +{ + if (!ras_block) + return "NULL"; + + if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) + return "OUT OF RANGE"; + + if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) + return ras_mca_block_string[ras_block->sub_block_index]; + + return ras_block_string[ras_block->block]; +} + #define ras_err_str(i) (ras_error_string[ffs(i)]) -#define ras_block_str(i) (ras_block_string[i]) -#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1 -#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) /* inject address is 52 bits */ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) +/* typical ECC bad page rate is 1 bad page per 100MB VRAM */ +#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) + enum amdgpu_ras_retire_page_reservation { AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_PENDING, @@ -76,8 +107,75 @@ enum amdgpu_ras_retire_page_reservation { atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); +static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, + uint64_t addr); static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, uint64_t addr); +#ifdef CONFIG_X86_MCE_AMD +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); +struct mce_notifier_adev_list { + struct amdgpu_device *devs[MAX_GPU_INSTANCE]; + int num_gpu; +}; +static struct mce_notifier_adev_list mce_adev_list; +#endif + +void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) +{ + if (adev && amdgpu_ras_get_context(adev)) + amdgpu_ras_get_context(adev)->error_query_ready = ready; +} + +static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) +{ + if (adev && amdgpu_ras_get_context(adev)) + return amdgpu_ras_get_context(adev)->error_query_ready; + + return false; +} + +static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) +{ + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct eeprom_table_record err_rec; + + if ((address >= adev->gmc.mc_vram_size) || + (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + dev_warn(adev->dev, + "RAS WARN: input address 0x%llx is invalid.\n", + address); + return -EINVAL; + } + + if (amdgpu_ras_check_bad_page(adev, address)) { + dev_warn(adev->dev, + "RAS WARN: 0x%llx has already been marked as bad page!\n", + address); + return 0; + } + + memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); + + err_rec.address = address; + err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT; + err_rec.ts = (uint64_t)ktime_get_real_seconds(); + err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + + err_data.err_addr = &err_rec; + err_data.err_addr_cnt = 1; + + if (amdgpu_bad_page_threshold != 0) { + amdgpu_ras_add_bad_pages(adev, err_data.err_addr, + err_data.err_addr_cnt); + amdgpu_ras_save_bad_pages(adev); + } + + dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); + dev_warn(adev->dev, "Clear EEPROM:\n"); + dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); + + return 0; +} static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) @@ -89,7 +187,7 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, ssize_t s; char val[128]; - if (amdgpu_ras_error_query(obj->adev, &info)) + if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", @@ -123,7 +221,7 @@ static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { *block_id = i; - if (strcmp(name, ras_block_str(i)) == 0) + if (strcmp(name, ras_block_string[i]) == 0) return 0; } return -EINVAL; @@ -158,11 +256,24 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, op = 1; else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) op = 2; + else if (strstr(str, "retire_page") != NULL) + op = 3; else if (str[0] && str[1] && str[2] && str[3]) /* ascii string, but commands are not matched. */ return -EINVAL; if (op != -1) { + if (op == 3) { + if (sscanf(str, "%*s 0x%llx", &address) != 1 && + sscanf(str, "%*s %llu", &address) != 1) + return -EINVAL; + + data->op = op; + data->inject.address = address; + + return 0; + } + if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) return -EINVAL; @@ -178,11 +289,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, data->op = op; if (op == 2) { - if (sscanf(str, "%*s %*s %*s %u %llu %llu", - &sub_block, &address, &value) != 3) - if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", - &sub_block, &address, &value) != 3) - return -EINVAL; + if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", + &sub_block, &address, &value) != 3 && + sscanf(str, "%*s %*s %*s %u %llu %llu", + &sub_block, &address, &value) != 3) + return -EINVAL; data->head.sub_block_index = sub_block; data->inject.address = address; data->inject.value = value; @@ -198,13 +309,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return 0; } -static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, - struct ras_common_if *head); - /** * DOC: AMDGPU RAS debugfs control interface * - * It accepts struct ras_debug_if who has two members. + * The control interface accepts struct ras_debug_if which has two members. * * First member: ras_debug_if::head or ras_debug_if::inject. * @@ -229,32 +337,36 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, * * How to use the interface? * - * Programs + * In a program * - * Copy the struct ras_debug_if in your codes and initialize it. - * Write the struct to the control node. + * Copy the struct ras_debug_if in your code and initialize it. + * Write the struct to the control interface. * - * Shells + * From shell * * .. code-block:: bash * - * echo op block [error [sub_block address value]] > .../ras/ras_ctrl + * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl + * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl + * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl + * + * Where N, is the card which you want to affect. * - * Parameters: + * "disable" requires only the block. + * "enable" requires the block and error type. + * "inject" requires the block, error type, address, and value. * - * op: disable, enable, inject - * disable: only block is needed - * enable: block and error are needed - * inject: error, address, value are needed - * block: umc, sdma, gfx, ......... + * The block is one of: umc, sdma, gfx, etc. * see ras_block_string[] for details - * error: ue, ce - * ue: multi_uncorrectable - * ce: single_correctable - * sub_block: - * sub block index, pass 0 if there is no sub block * - * here are some examples for bash commands: + * The error type is one of: ue, ce, where, + * ue is multi-uncorrectable + * ce is single-correctable + * + * The sub-block is a the sub-block index, pass 0 if there is no sub-block. + * The address and value are hexadecimal numbers, leading 0x is optional. + * + * For instance, * * .. code-block:: bash * @@ -262,30 +374,45 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl * - * How to check the result? + * How to check the result of the operation? * - * For disable/enable, please check ras features at + * To check disable/enable, see "ras" features at, * /sys/class/drm/card[0/1/2...]/device/ras/features * - * For inject, please check corresponding err count at - * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count + * To check inject, see the corresponding error count at, + * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count * * .. note:: * Operations are only allowed on blocks which are supported. - * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask * to see which blocks support RAS on a particular asic. * */ -static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) +static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, + const char __user *buf, + size_t size, loff_t *pos) { struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; struct ras_debug_if data; int ret = 0; + if (!amdgpu_ras_get_error_query_ready(adev)) { + dev_warn(adev->dev, "RAS WARN: error injection " + "currently inaccessible\n"); + return size; + } + ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); if (ret) - return -EINVAL; + return ret; + + if (data.op == 3) { + ret = amdgpu_reserve_page_direct(adev, data.inject.address); + if (!ret) + return size; + else + return ret; + } if (!amdgpu_ras_is_supported(adev, data.head.block)) return -EINVAL; @@ -300,6 +427,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * case 2: if ((data.inject.address >= adev->gmc.mc_vram_size) || (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + dev_warn(adev->dev, "RAS WARN: input address " + "0x%llx is invalid.", + data.inject.address); ret = -EINVAL; break; } @@ -307,8 +437,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * /* umc ce/ue error injection for a bad page is not allowed */ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && amdgpu_ras_check_bad_page(adev, data.inject.address)) { - DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", - data.inject.address); + dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " + "already been marked as bad!\n", + data.inject.address); break; } @@ -318,7 +449,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * default: ret = -EINVAL; break; - }; + } if (ret) return -EINVAL; @@ -342,15 +473,25 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * * will reset EEPROM table to 0 entries. * */ -static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) +static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, + const char __user *buf, + size_t size, loff_t *pos) { - struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct amdgpu_device *adev = + (struct amdgpu_device *)file_inode(f)->i_private; int ret; - ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); + ret = amdgpu_ras_eeprom_reset_table( + &(amdgpu_ras_get_context(adev)->eeprom_control)); - return ret == 1 ? size : -EIO; + if (!ret) { + /* Something was written to EEPROM. + */ + amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; + return size; + } else { + return ret; + } } static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { @@ -396,12 +537,19 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, .head = obj->head, }; - if (amdgpu_ras_error_query(obj->adev, &info)) + if (!amdgpu_ras_get_error_query_ready(obj->adev)) + return sysfs_emit(buf, "Query currently inaccessible\n"); + + if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n", - "ue", info.ue_count, - "ce", info.ce_count); + if (obj->adev->asic_type == CHIP_ALDEBARAN) { + if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) + DRM_WARN("Failed to reset error counter and error status"); + } + + return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, + "ce", info.ce_count); } /* obj begin */ @@ -411,11 +559,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, static inline void put_obj(struct ras_manager *obj) { - if (obj && --obj->use == 0) + if (obj && (--obj->use == 0)) list_del(&obj->node); - if (obj && obj->use < 0) { - DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name); - } + if (obj && (obj->use < 0)) + DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); } /* make one obj and return it. */ @@ -425,13 +572,20 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - if (!con) + if (!adev->ras_enabled || !con) return NULL; if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; + + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; + /* already exist. return obj? */ if (alive_obj(obj)) return NULL; @@ -445,33 +599,35 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, } /* return an obj equal to head, or the first when head is NULL */ -static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, +struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, struct ras_common_if *head) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; int i; - if (!con) + if (!adev->ras_enabled || !con) return NULL; if (head) { if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; + + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; - if (alive_obj(obj)) { - WARN_ON(head->block != obj->head.block); + if (alive_obj(obj)) return obj; - } } else { - for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { obj = &con->objs[i]; - if (alive_obj(obj)) { - WARN_ON(i != obj->head.block); + if (alive_obj(obj)) return obj; - } } } @@ -481,11 +637,9 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, /* feature ctl begin */ static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, - struct ras_common_if *head) + struct ras_common_if *head) { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - - return con->hw_supported & BIT(head->block); + return adev->ras_hw_enabled & BIT(head->block); } static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, @@ -514,8 +668,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, */ if (!amdgpu_ras_is_feature_allowed(adev, head)) return 0; - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) - return 0; if (enable) { if (!obj) { @@ -542,19 +694,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - union ta_ras_cmd_input info; + union ta_ras_cmd_input *info; int ret; if (!con) return -EINVAL; + info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); + if (!info) + return -ENOMEM; + if (!enable) { - info.disable_features = (struct ta_ras_disable_features_input) { + info->disable_features = (struct ta_ras_disable_features_input) { .block_id = amdgpu_ras_block_to_ta(head->block), .error_type = amdgpu_ras_error_to_ta(head->type), }; } else { - info.enable_features = (struct ta_ras_enable_features_input) { + info->enable_features = (struct ta_ras_enable_features_input) { .block_id = amdgpu_ras_block_to_ta(head->block), .error_type = amdgpu_ras_error_to_ta(head->type), }; @@ -562,27 +718,24 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, /* Do not enable if it is not allowed. */ WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); - /* Are we alerady in that state we are going to set? */ - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) - return 0; if (!amdgpu_ras_intr_triggered()) { - ret = psp_ras_enable_features(&adev->psp, &info, enable); + ret = psp_ras_enable_features(&adev->psp, info, enable); if (ret) { - DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", - enable ? "enable":"disable", - ras_block_str(head->block), - ret); - if (ret == TA_RAS_STATUS__RESET_NEEDED) - return -EAGAIN; - return -EINVAL; + dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", + enable ? "enable":"disable", + get_ras_block_str(head), + amdgpu_ras_is_poison_mode_supported(adev), ret); + goto out; } } /* setup the obj */ __amdgpu_ras_feature_enable(adev, head, enable); - - return 0; + ret = 0; +out: + kfree(info); + return ret; } /* Only used in device probe stage and called only once. */ @@ -611,8 +764,9 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (ret == -EINVAL) { ret = __amdgpu_ras_feature_enable(adev, head, 1); if (!ret) - DRM_INFO("RAS INFO: %s setup object\n", - ras_block_str(head->block)); + dev_info(adev->dev, + "RAS INFO: %s setup object\n", + get_ras_block_str(head)); } } else { /* setup the object then issue a ras TA disable cmd.*/ @@ -620,7 +774,15 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (ret) return ret; + /* gfx block ras dsiable cmd must send to ras-ta */ + if (head->block == AMDGPU_RAS_BLOCK__GFX) + con->features |= BIT(head->block); + ret = amdgpu_ras_feature_enable(adev, head, 0); + + /* clean gfx block ras features flag */ + if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) + con->features &= ~BIT(head->block); } } else ret = amdgpu_ras_feature_enable(adev, head, enable); @@ -654,18 +816,39 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, bool bypass) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; int i; - const enum amdgpu_ras_error_type default_ras_type = - AMDGPU_RAS_ERROR__NONE; + const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; - for (i = 0; i < ras_block_count; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { struct ras_common_if head = { .block = i, .type = default_ras_type, .sub_block_index = 0, }; - strcpy(head.name, ras_block_str(i)); + + if (i == AMDGPU_RAS_BLOCK__MCA) + continue; + + if (bypass) { + /* + * bypass psp. vbios enable ras for us. + * so just create the obj + */ + if (__amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } else { + if (amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } + } + + for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__MCA, + .type = default_ras_type, + .sub_block_index = i, + }; + if (bypass) { /* * bypass psp. vbios enable ras for us. @@ -683,37 +866,97 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, } /* feature ctl end */ + +void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_err_data *err_data) +{ + switch (ras_block->sub_block_index) { + case AMDGPU_RAS_MCA_BLOCK__MP0: + if (adev->mca.mp0.ras_funcs && + adev->mca.mp0.ras_funcs->query_ras_error_count) + adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MP1: + if (adev->mca.mp1.ras_funcs && + adev->mca.mp1.ras_funcs->query_ras_error_count) + adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MPIO: + if (adev->mca.mpio.ras_funcs && + adev->mca.mpio.ras_funcs->query_ras_error_count) + adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data); + break; + default: + break; + } +} + /* query/inject/cure begin */ -int amdgpu_ras_error_query(struct amdgpu_device *adev, - struct ras_query_if *info) +int amdgpu_ras_query_error_status(struct amdgpu_device *adev, + struct ras_query_if *info) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; + int i; if (!obj) return -EINVAL; switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: - if (adev->umc.funcs->query_ras_error_count) - adev->umc.funcs->query_ras_error_count(adev, &err_data); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, &err_data); /* umc query_ras_error_address is also responsible for clearing * error status */ - if (adev->umc.funcs->query_ras_error_address) - adev->umc.funcs->query_ras_error_address(adev, &err_data); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address) + adev->umc.ras_funcs->query_ras_error_address(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__SDMA: + if (adev->sdma.funcs->query_ras_error_count) { + for (i = 0; i < adev->sdma.num_instances; i++) + adev->sdma.funcs->query_ras_error_count(adev, i, + &err_data); + } break; case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.funcs->query_ras_error_count) - adev->gfx.funcs->query_ras_error_count(adev, &err_data); + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->query_ras_error_count) + adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data); + + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->query_ras_error_status) + adev->gfx.ras_funcs->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub.funcs->query_ras_error_count) - adev->mmhub.funcs->query_ras_error_count(adev, &err_data); + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->query_ras_error_count) + adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data); + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->query_ras_error_status) + adev->mmhub.ras_funcs->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__PCIE_BIF: - if (adev->nbio.funcs->query_ras_error_count) - adev->nbio.funcs->query_ras_error_count(adev, &err_data); + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->query_ras_error_count) + adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + if (adev->gmc.xgmi.ras_funcs && + adev->gmc.xgmi.ras_funcs->query_ras_error_count) + adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__HDP: + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->query_ras_error_count) + adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__MCA: + amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data); break; default: break; @@ -726,17 +969,117 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, info->ce_count = obj->err_data.ce_count; if (err_data.ce_count) { - dev_info(adev->dev, "%ld correctable errors detected in %s block\n", - obj->err_data.ce_count, ras_block_str(info->head.block)); + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { + dev_info(adev->dev, "socket: %d, die: %d " + "%ld correctable hardware errors " + "detected in %s block, no user " + "action is needed.\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + obj->err_data.ce_count, + get_ras_block_str(&info->head)); + } else { + dev_info(adev->dev, "%ld correctable hardware errors " + "detected in %s block, no user " + "action is needed.\n", + obj->err_data.ce_count, + get_ras_block_str(&info->head)); + } } if (err_data.ue_count) { - dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", - obj->err_data.ue_count, ras_block_str(info->head.block)); + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { + dev_info(adev->dev, "socket: %d, die: %d " + "%ld uncorrectable hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + obj->err_data.ue_count, + get_ras_block_str(&info->head)); + } else { + dev_info(adev->dev, "%ld uncorrectable hardware errors " + "detected in %s block\n", + obj->err_data.ue_count, + get_ras_block_str(&info->head)); + } } + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + amdgpu_ras_reset_error_status(adev, info->head.block); + return 0; } +int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, + enum amdgpu_ras_block block) +{ + if (!amdgpu_ras_is_supported(adev, block)) + return -EINVAL; + + switch (block) { + case AMDGPU_RAS_BLOCK__GFX: + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->reset_ras_error_count) + adev->gfx.ras_funcs->reset_ras_error_count(adev); + + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->reset_ras_error_status) + adev->gfx.ras_funcs->reset_ras_error_status(adev); + break; + case AMDGPU_RAS_BLOCK__MMHUB: + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_count) + adev->mmhub.ras_funcs->reset_ras_error_count(adev); + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_status) + adev->mmhub.ras_funcs->reset_ras_error_status(adev); + break; + case AMDGPU_RAS_BLOCK__SDMA: + if (adev->sdma.funcs->reset_ras_error_count) + adev->sdma.funcs->reset_ras_error_count(adev); + break; + case AMDGPU_RAS_BLOCK__HDP: + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->reset_ras_error_count) + adev->hdp.ras_funcs->reset_ras_error_count(adev); + break; + default: + break; + } + + return 0; +} + +/* Trigger XGMI/WAFL error */ +static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, + struct ta_ras_trigger_error_input *block_info) +{ + int ret; + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + dev_warn(adev->dev, "Failed to disallow df cstate"); + + if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) + dev_warn(adev->dev, "Failed to disallow XGMI power down"); + + ret = psp_ras_trigger_error(&adev->psp, block_info); + + if (amdgpu_ras_intr_triggered()) + return ret; + + if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) + dev_warn(adev->dev, "Failed to allow XGMI power down"); + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + dev_warn(adev->dev, "Failed to allow df cstate"); + + return ret; +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -754,64 +1097,94 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1) { + block_info.address = + amdgpu_xgmi_get_relative_phy_addr(adev, + block_info.address); + } + switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.funcs->ras_error_inject) - ret = adev->gfx.funcs->ras_error_inject(adev, info); + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->ras_error_inject) + ret = adev->gfx.ras_funcs->ras_error_inject(adev, info); else ret = -EINVAL; break; case AMDGPU_RAS_BLOCK__UMC: + case AMDGPU_RAS_BLOCK__SDMA: case AMDGPU_RAS_BLOCK__MMHUB: - case AMDGPU_RAS_BLOCK__XGMI_WAFL: case AMDGPU_RAS_BLOCK__PCIE_BIF: + case AMDGPU_RAS_BLOCK__MCA: ret = psp_ras_trigger_error(&adev->psp, &block_info); break; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + ret = amdgpu_ras_error_inject_xgmi(adev, &block_info); + break; default: - DRM_INFO("%s error injection is not supported yet\n", - ras_block_str(info->head.block)); + dev_info(adev->dev, "%s error injection is not supported yet\n", + get_ras_block_str(&info->head)); ret = -EINVAL; } if (ret) - DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", - ras_block_str(info->head.block), - ret); + dev_err(adev->dev, "ras inject %s failed %d\n", + get_ras_block_str(&info->head), ret); return ret; } -int amdgpu_ras_error_cure(struct amdgpu_device *adev, - struct ras_cure_if *info) -{ - /* psp fw has no cure interface for now. */ - return 0; -} - -/* get the total error counts on all IPs */ -unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, - bool is_ce) +/** + * amdgpu_ras_query_error_count -- Get error counts of all IPs + * adev: pointer to AMD GPU device + * ce_count: pointer to an integer to be set to the count of correctible errors. + * ue_count: pointer to an integer to be set to the count of uncorrectible + * errors. + * + * If set, @ce_count or @ue_count, count and return the corresponding + * error counts in those integer pointers. Return 0 if the device + * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. + */ +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - struct ras_err_data data = {0, 0}; + unsigned long ce, ue; - if (!con) + if (!adev->ras_enabled || !con) + return -EOPNOTSUPP; + + /* Don't count since no reporting. + */ + if (!ce_count && !ue_count) return 0; + ce = 0; + ue = 0; list_for_each_entry(obj, &con->head, node) { struct ras_query_if info = { .head = obj->head, }; + int res; - if (amdgpu_ras_error_query(adev, &info)) - return 0; + res = amdgpu_ras_query_error_status(adev, &info); + if (res) + return res; - data.ce_count += info.ce_count; - data.ue_count += info.ue_count; + ce += info.ce_count; + ue += info.ue_count; } - return is_ce ? data.ce_count : data.ue_count; + if (ce_count) + *ce_count = ce; + + if (ue_count) + *ue_count = ue; + + return 0; } /* query/inject/cure end */ @@ -831,7 +1204,7 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags) case AMDGPU_RAS_RETIRE_PAGE_FAULT: default: return "F"; - }; + } } /** @@ -905,45 +1278,13 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); } -static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) +static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct attribute *attrs[] = { - &con->features_attr.attr, - NULL - }; - struct bin_attribute *bin_attrs[] = { - &con->badpages_attr, - NULL - }; - struct attribute_group group = { - .name = "ras", - .attrs = attrs, - .bin_attrs = bin_attrs, - }; - - con->features_attr = (struct device_attribute) { - .attr = { - .name = "features", - .mode = S_IRUGO, - }, - .show = amdgpu_ras_sysfs_features_read, - }; - - con->badpages_attr = (struct bin_attribute) { - .attr = { - .name = "gpu_vram_bad_pages", - .mode = S_IRUGO, - }, - .size = 0, - .private = NULL, - .read = amdgpu_ras_sysfs_badpages_read, - }; - sysfs_attr_init(attrs[0]); - sysfs_bin_attr_init(bin_attrs[0]); - - return sysfs_create_group(&adev->dev->kobj, &group); + sysfs_remove_file_from_group(&adev->dev->kobj, + &con->badpages_attr.attr, + RAS_FS_NAME); } static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) @@ -953,14 +1294,9 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) &con->features_attr.attr, NULL }; - struct bin_attribute *bin_attrs[] = { - &con->badpages_attr, - NULL - }; struct attribute_group group = { - .name = "ras", + .name = RAS_FS_NAME, .attrs = attrs, - .bin_attrs = bin_attrs, }; sysfs_remove_group(&adev->dev->kobj, &group); @@ -993,7 +1329,7 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, if (sysfs_add_file_to_group(&adev->dev->kobj, &obj->sysfs_attr.attr, - "ras")) { + RAS_FS_NAME)) { put_obj(obj); return -EINVAL; } @@ -1013,7 +1349,7 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, sysfs_remove_file_from_group(&adev->dev->kobj, &obj->sysfs_attr.attr, - "ras"); + RAS_FS_NAME); obj->attr_inuse = 0; put_obj(obj); @@ -1029,6 +1365,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_sysfs_remove(adev, &obj->head); } + if (amdgpu_bad_page_threshold != 0) + amdgpu_ras_sysfs_remove_bad_page_node(adev); + amdgpu_ras_sysfs_remove_feature_node(adev); return 0; @@ -1054,16 +1393,27 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) * */ /* debugfs begin */ -static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) +static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct drm_minor *minor = adev->ddev->primary; - - con->dir = debugfs_create_dir("ras", minor->debugfs_root); - debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, - adev, &amdgpu_ras_debugfs_ctrl_ops); - debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, - adev, &amdgpu_ras_debugfs_eeprom_ops); + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *dir; + + dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); + debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_eeprom_ops); + debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, + &con->bad_page_cnt_threshold); + debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); + debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); + debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_eeprom_size_ops); + con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", + S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_eeprom_table_ops); + amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); /* * After one uncorrectable error happens, usually GPU recovery will @@ -1073,17 +1423,24 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine * will never be called. */ - debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, - &con->reboot); + debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); + + /* + * User could set this not to clean up hardware's error count register + * of RAS IPs during ras recovery. + */ + debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, + &con->disable_ras_err_cnt_harvest); + return dir; } -void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, - struct ras_fs_if *head) +static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, + struct ras_fs_if *head, + struct dentry *dir) { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); - if (!obj || obj->ent) + if (!obj || !dir) return; get_obj(obj); @@ -1092,51 +1449,94 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, head->debugfs_name, sizeof(obj->fs_data.debugfs_name)); - obj->ent = debugfs_create_file(obj->fs_data.debugfs_name, - S_IWUGO | S_IRUGO, con->dir, obj, - &amdgpu_ras_debugfs_ops); + debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, + obj, &amdgpu_ras_debugfs_ops); } -void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, - struct ras_common_if *head) +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) { - struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct dentry *dir; + struct ras_manager *obj; + struct ras_fs_if fs_info; - if (!obj || !obj->ent) + /* + * it won't be called in resume path, no need to check + * suspend and gpu reset status + */ + if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) return; - debugfs_remove(obj->ent); - obj->ent = NULL; - put_obj(obj); -} + dir = amdgpu_ras_debugfs_create_ctrl_node(adev); -static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) -{ - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_manager *obj, *tmp; - - list_for_each_entry_safe(obj, tmp, &con->head, node) { - amdgpu_ras_debugfs_remove(adev, &obj->head); + list_for_each_entry(obj, &con->head, node) { + if (amdgpu_ras_is_supported(adev, obj->head.block) && + (obj->attr_inuse == 1)) { + sprintf(fs_info.debugfs_name, "%s_err_inject", + get_ras_block_str(&obj->head)); + fs_info.head = obj->head; + amdgpu_ras_debugfs_create(adev, &fs_info, dir); + } } - - debugfs_remove_recursive(con->dir); - con->dir = NULL; } + /* debugfs end */ /* ras fs */ - +static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, + amdgpu_ras_sysfs_badpages_read, NULL, 0); +static DEVICE_ATTR(features, S_IRUGO, + amdgpu_ras_sysfs_features_read, NULL); static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { - amdgpu_ras_sysfs_create_feature_node(adev); - amdgpu_ras_debugfs_create_ctrl_node(adev); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct attribute_group group = { + .name = RAS_FS_NAME, + }; + struct attribute *attrs[] = { + &con->features_attr.attr, + NULL + }; + struct bin_attribute *bin_attrs[] = { + NULL, + NULL, + }; + int r; + + /* add features entry */ + con->features_attr = dev_attr_features; + group.attrs = attrs; + sysfs_attr_init(attrs[0]); + + if (amdgpu_bad_page_threshold != 0) { + /* add bad_page_features entry */ + bin_attr_gpu_vram_bad_pages.private = NULL; + con->badpages_attr = bin_attr_gpu_vram_bad_pages; + bin_attrs[0] = &con->badpages_attr; + group.bin_attrs = bin_attrs; + sysfs_bin_attr_init(bin_attrs[0]); + } + + r = sysfs_create_group(&adev->dev->kobj, &group); + if (r) + dev_err(adev->dev, "Failed to create RAS sysfs group!"); return 0; } static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) { - amdgpu_ras_debugfs_remove_all(adev); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *con_obj, *ip_obj, *tmp; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + list_for_each_entry_safe(con_obj, tmp, &con->head, node) { + ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); + if (ip_obj) + put_obj(ip_obj); + } + } + amdgpu_ras_sysfs_remove_all(adev); return 0; } @@ -1159,22 +1559,28 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) data->rptr = (data->aligned_element_size + data->rptr) % data->ring_size; - /* Let IP handle its data, maybe we need get the output - * from the callback to udpate the error type/count, etc - */ if (data->cb) { - ret = data->cb(obj->adev, &err_data, &entry); - /* ue will trigger an interrupt, and in that case - * we need do a reset to recovery the whole system. - * But leave IP do that recovery, here we just dispatch - * the error. - */ - if (ret == AMDGPU_RAS_SUCCESS) { - /* these counts could be left as 0 if - * some blocks do not count error number + if (amdgpu_ras_is_poison_mode_supported(obj->adev) && + obj->head.block == AMDGPU_RAS_BLOCK__UMC) + dev_info(obj->adev->dev, + "Poison is created, no user action is needed.\n"); + else { + /* Let IP handle its data, maybe we need get the output + * from the callback to udpate the error type/count, etc */ - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; + ret = data->cb(obj->adev, &err_data, &entry); + /* ue will trigger an interrupt, and in that case + * we need do a reset to recovery the whole system. + * But leave IP do that recovery, here we just dispatch + * the error. + */ + if (ret == AMDGPU_RAS_SUCCESS) { + /* these counts could be left as 0 if + * some blocks do not count error number + */ + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + } } } } @@ -1294,6 +1700,74 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) } /* ih end */ +/* traversal all IPs except NBIO to query error counter */ +static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj; + + if (!adev->ras_enabled || !con) + return; + + list_for_each_entry(obj, &con->head, node) { + struct ras_query_if info = { + .head = obj->head, + }; + + /* + * PCIE_BIF IP has one different isr by ras controller + * interrupt, the specific ras counter query will be + * done in that isr. So skip such block from common + * sync flood interrupt isr calling. + */ + if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) + continue; + + amdgpu_ras_query_error_status(adev, &info); + } +} + +/* Parse RdRspStatus and WrRspStatus */ +static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, + struct ras_query_if *info) +{ + /* + * Only two block need to query read/write + * RspStatus at current state + */ + switch (info->head.block) { + case AMDGPU_RAS_BLOCK__GFX: + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->query_ras_error_status) + adev->gfx.ras_funcs->query_ras_error_status(adev); + break; + case AMDGPU_RAS_BLOCK__MMHUB: + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->query_ras_error_status) + adev->mmhub.ras_funcs->query_ras_error_status(adev); + break; + default: + break; + } +} + +static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj; + + if (!adev->ras_enabled || !con) + return; + + list_for_each_entry(obj, &con->head, node) { + struct ras_query_if info = { + .head = obj->head, + }; + + amdgpu_ras_error_status_query(adev, &info); + } +} + /* recovery begin */ /* return 0 on success. @@ -1305,7 +1779,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; int i = 0; - int ret = 0; + int ret = 0, status; if (!con || !con->eh_data || !bps || !count) return -EINVAL; @@ -1314,6 +1788,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, data = con->eh_data; if (!data || data->count == 0) { *bps = NULL; + ret = -EINVAL; goto out; } @@ -1329,10 +1804,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, .size = AMDGPU_GPU_PAGE_SIZE, .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; - - if (data->last_reserved <= i) + status = amdgpu_vram_mgr_query_page_status( + ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + data->bps[i].retired_page); + if (status == -EBUSY) (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; - else if (data->bps_bo[i] == NULL) + else if (status == -ENOENT) (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; } @@ -1346,8 +1823,33 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) { struct amdgpu_ras *ras = container_of(work, struct amdgpu_ras, recovery_work); + struct amdgpu_device *remote_adev = NULL; + struct amdgpu_device *adev = ras->adev; + struct list_head device_list, *device_list_handle = NULL; + + if (!ras->disable_ras_err_cnt_harvest) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + + /* Build list of devices to query RAS related errors */ + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { + device_list_handle = &hive->device_list; + } else { + INIT_LIST_HEAD(&device_list); + list_add_tail(&adev->gmc.xgmi.head, &device_list); + device_list_handle = &device_list; + } + + list_for_each_entry(remote_adev, + device_list_handle, gmc.xgmi.head) { + amdgpu_ras_query_err_status(remote_adev); + amdgpu_ras_log_on_err_counter(remote_adev); + } - amdgpu_device_gpu_recover(ras->adev, 0); + amdgpu_put_xgmi_hive(hive); + } + + if (amdgpu_device_should_recover_gpu(ras->adev)) + amdgpu_device_gpu_recover(ras->adev, NULL); atomic_set(&ras->in_recovery, 0); } @@ -1359,12 +1861,9 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, unsigned int new_space = old_space + pages; unsigned int align_space = ALIGN(new_space, 512); void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); - struct amdgpu_bo **bps_bo = - kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL); - if (!bps || !bps_bo) { + if (!bps) { kfree(bps); - kfree(bps_bo); return -ENOMEM; } @@ -1373,14 +1872,8 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, data->count * sizeof(*data->bps)); kfree(data->bps); } - if (data->bps_bo) { - memcpy(bps_bo, data->bps_bo, - data->count * sizeof(*data->bps_bo)); - kfree(data->bps_bo); - } data->bps = bps; - data->bps_bo = bps_bo; data->space_left += align_space - old_space; return 0; } @@ -1392,6 +1885,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; int ret = 0; + uint32_t i; if (!con || !con->eh_data || !bps || pages <= 0) return 0; @@ -1401,16 +1895,26 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, if (!data) goto out; - if (data->space_left <= pages) - if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) { + for (i = 0; i < pages; i++) { + if (amdgpu_ras_check_bad_page_unlock(con, + bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) + continue; + + if (!data->space_left && + amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { ret = -ENOMEM; goto out; } - memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); - data->count += pages; - data->space_left -= pages; + amdgpu_vram_mgr_reserve_range( + ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE); + memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); + data->count++; + data->space_left--; + } out: mutex_unlock(&con->recovery_lock); @@ -1421,7 +1925,7 @@ out: * write error record array to eeprom, the function should be * protected by recovery_lock */ -static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) +int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; @@ -1433,17 +1937,19 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) control = &con->eeprom_control; data = con->eh_data; - save_count = data->count - control->num_recs; + save_count = data->count - control->ras_num_recs; /* only new entries are saved */ - if (save_count > 0) - if (amdgpu_ras_eeprom_process_recods(control, - &data->bps[control->num_recs], - true, - save_count)) { - DRM_ERROR("Failed to save EEPROM table data!"); + if (save_count > 0) { + if (amdgpu_ras_eeprom_append(control, + &data->bps[control->ras_num_recs], + save_count)) { + dev_err(adev->dev, "Failed to save EEPROM table data!"); return -EIO; } + dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); + } + return 0; } @@ -1454,148 +1960,120 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) { struct amdgpu_ras_eeprom_control *control = - &adev->psp.ras.ras->eeprom_control; - struct eeprom_table_record *bps = NULL; - int ret = 0; + &adev->psp.ras_context.ras->eeprom_control; + struct eeprom_table_record *bps; + int ret; /* no bad page record, skip eeprom access */ - if (!control->num_recs) - return ret; + if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) + return 0; - bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); + bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); if (!bps) return -ENOMEM; - if (amdgpu_ras_eeprom_process_recods(control, bps, false, - control->num_recs)) { - DRM_ERROR("Failed to load EEPROM table records!"); - ret = -EIO; - goto out; - } - - ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); + ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); + if (ret) + dev_err(adev->dev, "Failed to load EEPROM table records!"); + else + ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs); -out: kfree(bps); return ret; } -/* - * check if an address belongs to bad page - * - * Note: this check is only for umc block - */ -static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, +static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, uint64_t addr) { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data *data; + struct ras_err_handler_data *data = con->eh_data; int i; - bool ret = false; - - if (!con || !con->eh_data) - return ret; - - mutex_lock(&con->recovery_lock); - data = con->eh_data; - if (!data) - goto out; addr >>= AMDGPU_GPU_PAGE_SHIFT; for (i = 0; i < data->count; i++) - if (addr == data->bps[i].retired_page) { - ret = true; - goto out; - } + if (addr == data->bps[i].retired_page) + return true; -out: - mutex_unlock(&con->recovery_lock); - return ret; + return false; } -/* called in gpu recovery/init */ -int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) +/* + * check if an address belongs to bad page + * + * Note: this check is only for umc block + */ +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data *data; - uint64_t bp; - struct amdgpu_bo *bo = NULL; - int i, ret = 0; + bool ret = false; if (!con || !con->eh_data) - return 0; + return ret; mutex_lock(&con->recovery_lock); - data = con->eh_data; - if (!data) - goto out; - /* reserve vram at driver post stage. */ - for (i = data->last_reserved; i < data->count; i++) { - bp = data->bps[i].retired_page; - - /* There are two cases of reserve error should be ignored: - * 1) a ras bad page has been allocated (used by someone); - * 2) a ras bad page has been reserved (duplicate error injection - * for one page); - */ - if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, - AMDGPU_GPU_PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL)) - DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); - - data->bps_bo[i] = bo; - data->last_reserved = i + 1; - bo = NULL; - } - - /* continue to save bad pages to eeprom even reesrve_vram fails */ - ret = amdgpu_ras_save_bad_pages(adev); -out: + ret = amdgpu_ras_check_bad_page_unlock(con, addr); mutex_unlock(&con->recovery_lock); return ret; } -/* called when driver unload */ -static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) +static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, + uint32_t max_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data *data; - struct amdgpu_bo *bo; - int i; - - if (!con || !con->eh_data) - return 0; - mutex_lock(&con->recovery_lock); - data = con->eh_data; - if (!data) - goto out; - - for (i = data->last_reserved - 1; i >= 0; i--) { - bo = data->bps_bo[i]; + /* + * Justification of value bad_page_cnt_threshold in ras structure + * + * Generally, -1 <= amdgpu_bad_page_threshold <= max record length + * in eeprom, and introduce two scenarios accordingly. + * + * Bad page retirement enablement: + * - If amdgpu_bad_page_threshold = -1, + * bad_page_cnt_threshold = typical value by formula. + * + * - When the value from user is 0 < amdgpu_bad_page_threshold < + * max record length in eeprom, use it directly. + * + * Bad page retirement disablement: + * - If amdgpu_bad_page_threshold = 0, bad page retirement + * functionality is disabled, and bad_page_cnt_threshold will + * take no effect. + */ - amdgpu_bo_free_kernel(&bo, NULL, NULL); + if (amdgpu_bad_page_threshold < 0) { + u64 val = adev->gmc.mc_vram_size; - data->bps_bo[i] = bo; - data->last_reserved = i; + do_div(val, RAS_BAD_PAGE_COVER); + con->bad_page_cnt_threshold = min(lower_32_bits(val), + max_count); + } else { + con->bad_page_cnt_threshold = min_t(int, max_count, + amdgpu_bad_page_threshold); } -out: - mutex_unlock(&con->recovery_lock); - return 0; } int amdgpu_ras_recovery_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data **data; + u32 max_eeprom_records_count = 0; + bool exc_err_limit = false; int ret; - if (con) - data = &con->eh_data; - else + if (!con) + return 0; + + /* Allow access to RAS EEPROM via debugfs, when the ASIC + * supports RAS and debugfs is enabled, but when + * adev->ras_enabled is unset, i.e. when "ras_enable" + * module parameter is set to 0. + */ + con->adev = adev; + + if (!adev->ras_enabled) return 0; + data = &con->eh_data; *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); if (!*data) { ret = -ENOMEM; @@ -1605,32 +2083,55 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) mutex_init(&con->recovery_lock); INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); atomic_set(&con->in_recovery, 0); - con->adev = adev; - ret = amdgpu_ras_eeprom_init(&con->eeprom_control); - if (ret) + max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(); + amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); + + /* Todo: During test the SMU might fail to read the eeprom through I2C + * when the GPU is pending on XGMI reset during probe time + * (Mostly after second bus reset), skip it now + */ + if (adev->gmc.xgmi.pending_reset) + return 0; + ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); + /* + * This calling fails when exc_err_limit is true or + * ret != 0. + */ + if (exc_err_limit || ret) goto free; - if (con->eeprom_control.num_recs) { + if (con->eeprom_control.ras_num_recs) { ret = amdgpu_ras_load_bad_pages(adev); if (ret) goto free; - ret = amdgpu_ras_reserve_bad_pages(adev); - if (ret) - goto release; + + if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num) + adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); } +#ifdef CONFIG_X86_MCE_AMD + if ((adev->asic_type == CHIP_ALDEBARAN) && + (adev->gmc.xgmi.connected_to_cpu)) + amdgpu_register_bad_pages_mca_notifier(adev); +#endif return 0; -release: - amdgpu_ras_release_bad_pages(adev); free: kfree((*data)->bps); - kfree((*data)->bps_bo); kfree(*data); con->eh_data = NULL; out: - DRM_WARN("Failed to initialize ras recovery!\n"); + dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); + + /* + * Except error threshold exceeding case, other failure cases in this + * function would not fail amdgpu driver init. + */ + if (!exc_err_limit) + ret = 0; + else + ret = -EINVAL; return ret; } @@ -1645,12 +2146,10 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) return 0; cancel_work_sync(&con->recovery_work); - amdgpu_ras_release_bad_pages(adev); mutex_lock(&con->recovery_lock); con->eh_data = NULL; kfree(data->bps); - kfree(data->bps_bo); kfree(data); mutex_unlock(&con->recovery_lock); @@ -1658,17 +2157,32 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) } /* recovery end */ -/* return 0 if ras will reset gpu and repost.*/ -int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, - unsigned int block) +static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + return adev->asic_type == CHIP_VEGA10 || + adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS || + adev->asic_type == CHIP_ALDEBARAN || + adev->asic_type == CHIP_SIENNA_CICHLID; +} - if (!ras) - return -EINVAL; +/* + * this is workaround for vega20 workstation sku, + * force enable gfx ras, ignore vbios gfx ras flag + * due to GC EDC can not write + */ +static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) +{ + struct atom_context *ctx = adev->mode_info.atom_context; - ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET; - return 0; + if (!ctx) + return; + + if (strnstr(ctx->vbios_version, "D16406", + sizeof(ctx->vbios_version)) || + strnstr(ctx->vbios_version, "D36002", + sizeof(ctx->vbios_version))) + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); } /* @@ -1680,49 +2194,111 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, * we have to initialize ras as normal. but need check if operation is * allowed or not in each function. */ -static void amdgpu_ras_check_supported(struct amdgpu_device *adev, - uint32_t *hw_supported, uint32_t *supported) +static void amdgpu_ras_check_supported(struct amdgpu_device *adev) { - *hw_supported = 0; - *supported = 0; + adev->ras_hw_enabled = adev->ras_enabled = 0; - if (amdgpu_sriov_vf(adev) || - adev->asic_type != CHIP_VEGA20) + if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || + !amdgpu_ras_asic_supported(adev)) return; - if (adev->is_atom_fw && - (amdgpu_atomfirmware_mem_ecc_supported(adev) || - amdgpu_atomfirmware_sram_ecc_supported(adev))) - *hw_supported = AMDGPU_RAS_BLOCK_MASK; + if (!adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { + dev_info(adev->dev, "MEM ECC is active.\n"); + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else { + dev_info(adev->dev, "MEM ECC is not presented.\n"); + } - *supported = amdgpu_ras_enable == 0 ? - 0 : *hw_supported & amdgpu_ras_mask; + if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { + dev_info(adev->dev, "SRAM ECC is active.\n"); + adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else { + dev_info(adev->dev, "SRAM ECC is not presented.\n"); + } + } else { + /* driver only manages a few IP blocks RAS feature + * when GPU is connected cpu through XGMI */ + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | + 1 << AMDGPU_RAS_BLOCK__SDMA | + 1 << AMDGPU_RAS_BLOCK__MMHUB); + } + + amdgpu_ras_get_quirks(adev); + + /* hw_supported needs to be aligned with RAS block mask. */ + adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; + + adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; +} + +static void amdgpu_ras_counte_dw(struct work_struct *work) +{ + struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, + ras_counte_delay_work.work); + struct amdgpu_device *adev = con->adev; + struct drm_device *dev = adev_to_drm(adev); + unsigned long ce_count, ue_count; + int res; + + res = pm_runtime_get_sync(dev->dev); + if (res < 0) + goto Out; + + /* Cache new values. + */ + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } + + pm_runtime_mark_last_busy(dev->dev); +Out: + pm_runtime_put_autosuspend(dev->dev); } int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int r; + bool df_poison, umc_poison; if (con) return 0; con = kmalloc(sizeof(struct amdgpu_ras) + - sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, + sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + + sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, GFP_KERNEL|__GFP_ZERO); if (!con) return -ENOMEM; + con->adev = adev; + INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); + atomic_set(&con->ras_ce_count, 0); + atomic_set(&con->ras_ue_count, 0); + con->objs = (struct ras_manager *)(con + 1); amdgpu_ras_set_context(adev, con); - amdgpu_ras_check_supported(adev, &con->hw_supported, - &con->supported); - if (!con->hw_supported) { - amdgpu_ras_set_context(adev, NULL); - kfree(con); - return 0; + amdgpu_ras_check_supported(adev); + + if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { + /* set gfx block ras context feature for VEGA20 Gaming + * send ras disable cmd to ras ta during ras late init. + */ + if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { + con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); + + return 0; + } + + r = 0; + goto release_con; } con->features = 0; @@ -1730,32 +2306,103 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* Might need get this flag from vbios. */ con->flags = RAS_DEFAULT_FLAGS; - if (adev->nbio.funcs->init_ras_controller_interrupt) { - r = adev->nbio.funcs->init_ras_controller_interrupt(adev); + /* initialize nbio ras function ahead of any other + * ras functions so hardware fatal error interrupt + * can be enabled as early as possible */ + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + case CHIP_ALDEBARAN: + if (!adev->gmc.xgmi.connected_to_cpu) + adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs; + break; + default: + /* nbio ras is not available */ + break; + } + + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->init_ras_controller_interrupt) { + r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev); if (r) - return r; + goto release_con; } - if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { - r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) { + r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev); if (r) - return r; + goto release_con; } - amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; + /* Init poison supported flag, the default value is false */ + if (adev->df.funcs && + adev->df.funcs->query_ras_poison_mode && + adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_poison_mode) { + df_poison = + adev->df.funcs->query_ras_poison_mode(adev); + umc_poison = + adev->umc.ras_funcs->query_ras_poison_mode(adev); + /* Only poison is set in both DF and UMC, we can support it */ + if (df_poison && umc_poison) + con->poison_supported = true; + else if (df_poison != umc_poison) + dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", + df_poison, umc_poison); + } - if (amdgpu_ras_fs_init(adev)) - goto fs_out; + if (amdgpu_ras_fs_init(adev)) { + r = -EINVAL; + goto release_con; + } + + dev_info(adev->dev, "RAS INFO: ras initialized successfully, " + "hardware ability[%x] ras_mask[%x]\n", + adev->ras_hw_enabled, adev->ras_enabled); - DRM_INFO("RAS INFO: ras initialized successfully, " - "hardware ability[%x] ras_mask[%x]\n", - con->hw_supported, con->supported); return 0; -fs_out: +release_con: amdgpu_ras_set_context(adev, NULL); kfree(con); - return -EINVAL; + return r; +} + +int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) +{ + if (adev->gmc.xgmi.connected_to_cpu) + return 1; + return 0; +} + +static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, + struct ras_common_if *ras_block) +{ + struct ras_query_if info = { + .head = *ras_block, + }; + + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + return 0; + + if (amdgpu_ras_query_error_status(adev, &info) != 0) + DRM_WARN("RAS init harvest failure"); + + if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) + DRM_WARN("RAS init harvest reset failure"); + + return 0; +} + +bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!con) + return false; + + return con->poison_supported; } /* helper function to handle common stuff in ip late init phase */ @@ -1764,6 +2411,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, struct ras_fs_if *fs_info, struct ras_ih_if *ih_info) { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + unsigned long ue_count, ce_count; int r; /* disable RAS feature per IP block if it is not supported */ @@ -1774,12 +2423,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); if (r) { - if (r == -EAGAIN) { - /* request gpu reset. will run again */ - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - return 0; - } else if (adev->in_suspend || adev->in_gpu_reset) { + if (adev->in_suspend || amdgpu_in_reset(adev)) { /* in resume phase, if fail to enable ras, * clean up all ras fs nodes, and disable ras */ goto cleanup; @@ -1787,8 +2431,11 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, return r; } + /* check for errors on warm reset edc persisant supported ASIC */ + amdgpu_persistent_edc_harvesting(adev, ras_block); + /* in resume phase, no need to create ras fs node */ - if (adev->in_suspend || adev->in_gpu_reset) + if (adev->in_suspend || amdgpu_in_reset(adev)) return 0; if (ih_info->cb) { @@ -1797,17 +2444,21 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, goto interrupt; } - amdgpu_ras_debugfs_create(adev, fs_info); - r = amdgpu_ras_sysfs_create(adev, fs_info); if (r) goto sysfs; + /* Those are the cached values at init. + */ + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } + return 0; cleanup: amdgpu_ras_sysfs_remove(adev, ras_block); sysfs: - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); interrupt: @@ -1824,9 +2475,8 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev, return; amdgpu_ras_sysfs_remove(adev, ras_block); - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) - amdgpu_ras_interrupt_remove_handler(adev, ih_info); + amdgpu_ras_interrupt_remove_handler(adev, ih_info); amdgpu_ras_feature_enable(adev, ras_block, 0); } @@ -1838,8 +2488,12 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj, *tmp; - if (!con) + if (!adev->ras_enabled || !con) { + /* clean ras context for VEGA20 Gaming after send ras disable cmd */ + amdgpu_release_ras_context(adev); + return; + } if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { /* Set up all other IPs which are not implemented. There is a @@ -1861,26 +2515,13 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) } } } - - if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) { - con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET; - /* setup ras obj state as disabled. - * for init_by_vbios case. - * if we want to enable ras, just enable it in a normal way. - * If we want do disable it, need setup ras obj as enabled, - * then issue another TA disable cmd. - * See feature_enable_on_boot - */ - amdgpu_ras_disable_all_features(adev, 1); - amdgpu_ras_reset_gpu(adev, 0); - } } void amdgpu_ras_suspend(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!con) + if (!adev->ras_enabled || !con) return; amdgpu_ras_disable_all_features(adev, 0); @@ -1894,9 +2535,10 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!con) + if (!adev->ras_enabled || !con) return 0; + /* Need disable ras on all IPs here before ip [hw/sw]fini */ amdgpu_ras_disable_all_features(adev, 0); amdgpu_ras_recovery_fini(adev); @@ -1907,7 +2549,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!con) + if (!adev->ras_enabled || !con) return 0; amdgpu_ras_fs_fini(adev); @@ -1918,6 +2560,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) if (con->features) amdgpu_ras_disable_all_features(adev, 1); + cancel_delayed_work_sync(&con->ras_counte_delay_work); + amdgpu_ras_set_context(adev, NULL); kfree(con); @@ -1926,15 +2570,172 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { - uint32_t hw_supported, supported; - - amdgpu_ras_check_supported(adev, &hw_supported, &supported); - if (!hw_supported) + amdgpu_ras_check_supported(adev); + if (!adev->ras_hw_enabled) return; if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { - DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); + dev_info(adev->dev, "uncorrectable hardware error" + "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); + + amdgpu_ras_reset_gpu(adev); + } +} + +bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_VEGA20 && + adev->pm.fw_version <= 0x283400) { + return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && + amdgpu_ras_intr_triggered(); + } + + return false; +} + +void amdgpu_release_ras_context(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!con) + return; + + if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { + con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); + amdgpu_ras_set_context(adev, NULL); + kfree(con); + } +} + +#ifdef CONFIG_X86_MCE_AMD +static struct amdgpu_device *find_adev(uint32_t node_id) +{ + int i; + struct amdgpu_device *adev = NULL; + + for (i = 0; i < mce_adev_list.num_gpu; i++) { + adev = mce_adev_list.devs[i]; - amdgpu_ras_reset_gpu(adev, false); + if (adev && adev->gmc.xgmi.connected_to_cpu && + adev->gmc.xgmi.physical_node_id == node_id) + break; + adev = NULL; + } + + return adev; +} + +#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) +#define GET_UMC_INST(m) (((m) >> 21) & 0x7) +#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) +#define GPU_ID_OFFSET 8 + +static int amdgpu_bad_page_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct mce *m = (struct mce *)data; + struct amdgpu_device *adev = NULL; + uint32_t gpu_id = 0; + uint32_t umc_inst = 0; + uint32_t ch_inst, channel_index = 0; + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct eeprom_table_record err_rec; + uint64_t retired_page; + + /* + * If the error was generated in UMC_V2, which belongs to GPU UMCs, + * and error occurred in DramECC (Extended error code = 0) then only + * process the error, else bail out. + */ + if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) && + (XEC(m->status, 0x3f) == 0x0))) + return NOTIFY_DONE; + + /* + * If it is correctable error, return. + */ + if (mce_is_correctable(m)) + return NOTIFY_OK; + + /* + * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. + */ + gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; + + adev = find_adev(gpu_id); + if (!adev) { + DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, + gpu_id); + return NOTIFY_DONE; + } + + /* + * If it is uncorrectable error, then find out UMC instance and + * channel index. + */ + umc_inst = GET_UMC_INST(m->ipid); + ch_inst = GET_CHAN_INDEX(m->ipid); + + dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", + umc_inst, ch_inst); + + memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); + + /* + * Translate UMC channel address to Physical address + */ + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + + ch_inst]; + + retired_page = ADDR_OF_8KB_BLOCK(m->addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(m->addr); + + err_rec.address = m->addr; + err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec.ts = (uint64_t)ktime_get_real_seconds(); + err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec.cu = 0; + err_rec.mem_channel = channel_index; + err_rec.mcumc_id = umc_inst; + + err_data.err_addr = &err_rec; + err_data.err_addr_cnt = 1; + + if (amdgpu_bad_page_threshold != 0) { + amdgpu_ras_add_bad_pages(adev, err_data.err_addr, + err_data.err_addr_cnt); + amdgpu_ras_save_bad_pages(adev); + } + + return NOTIFY_OK; +} + +static struct notifier_block amdgpu_bad_page_nb = { + .notifier_call = amdgpu_bad_page_notifier, + .priority = MCE_PRIO_UC, +}; + +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) +{ + /* + * Add the adev to the mce_adev_list. + * During mode2 reset, amdgpu device is temporarily + * removed from the mgpu_info list which can cause + * page retirement to fail. + * Use this list instead of mgpu_info to find the amdgpu + * device on which the UMC error was reported. + */ + mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; + + /* + * Register the x86 notifier only once + * with MCE subsystem. + */ + if (notifier_registered == false) { + mce_register_decode_chain(&amdgpu_bad_page_nb); + notifier_registered = true; } } +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index f80fd3428c98..e36f4de9fa55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -31,6 +31,8 @@ #include "ta_ras_if.h" #include "amdgpu_ras_eeprom.h" +#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) + enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__SDMA, @@ -46,11 +48,22 @@ enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__MP0, AMDGPU_RAS_BLOCK__MP1, AMDGPU_RAS_BLOCK__FUSE, + AMDGPU_RAS_BLOCK__MCA, AMDGPU_RAS_BLOCK__LAST }; +enum amdgpu_ras_mca_block { + AMDGPU_RAS_MCA_BLOCK__MP0 = 0, + AMDGPU_RAS_MCA_BLOCK__MP1, + AMDGPU_RAS_MCA_BLOCK__MPIO, + AMDGPU_RAS_MCA_BLOCK__IOHC, + + AMDGPU_RAS_MCA_BLOCK__LAST +}; + #define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST +#define AMDGPU_RAS_MCA_BLOCK_COUNT AMDGPU_RAS_MCA_BLOCK__LAST #define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1) enum amdgpu_ras_gfx_subblock { @@ -303,23 +316,18 @@ struct ras_common_if { enum amdgpu_ras_block block; enum amdgpu_ras_error_type type; uint32_t sub_block_index; - /* block name */ char name[32]; }; struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ - uint32_t hw_supported; - /* for IP to check its ras ability. */ - uint32_t supported; uint32_t features; struct list_head head; - /* debugfs */ - struct dentry *dir; /* sysfs */ struct device_attribute features_attr; struct bin_attribute badpages_attr; + struct dentry *de_ras_eeprom_table; /* block array */ struct ras_manager *objs; @@ -334,6 +342,22 @@ struct amdgpu_ras { uint32_t flags; bool reboot; struct amdgpu_ras_eeprom_control eeprom_control; + + bool error_query_ready; + + /* bad page count threshold */ + uint32_t bad_page_cnt_threshold; + + /* disable ras error count harvest in recovery */ + bool disable_ras_err_cnt_harvest; + + /* is poison mode supported */ + bool poison_supported; + + /* RAS count errors delayed work */ + struct delayed_work ras_counte_delay_work; + atomic_t ras_ue_count; + atomic_t ras_ce_count; }; struct ras_fs_data { @@ -351,14 +375,10 @@ struct ras_err_data { struct ras_err_handler_data { /* point to bad page records array */ struct eeprom_table_record *bps; - /* point to reserved bo array */ - struct amdgpu_bo **bps_bo; /* the count of entries */ int count; /* the space can place new entries */ int space_left; - /* last reserved entry's index + 1 */ - int last_reserved; }; typedef int (*ras_ih_cb)(struct amdgpu_device *adev, @@ -388,8 +408,6 @@ struct ras_manager { struct list_head node; /* the device */ struct amdgpu_device *adev; - /* debugfs */ - struct dentry *ent; /* sysfs */ struct device_attribute sysfs_attr; int attr_inuse; @@ -412,7 +430,7 @@ struct ras_badpage { /* interfaces for IP */ struct ras_fs_if { struct ras_common_if head; - char sysfs_name[32]; + const char* sysfs_name; char debugfs_name[32]; }; @@ -464,8 +482,8 @@ struct ras_debug_if { * 8: feature disable */ -#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras) -#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con)) +#define amdgpu_ras_get_context(adev) ((adev)->psp.ras_context.ras) +#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras_context.ras = (ras_con)) /* check if ras is supported on block, say, sdma, gfx */ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, @@ -475,36 +493,28 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, if (block >= AMDGPU_RAS_BLOCK_COUNT) return 0; - return ras && (ras->supported & (1 << block)); + return ras && (adev->ras_enabled & (1 << block)); } int amdgpu_ras_recovery_init(struct amdgpu_device *adev); -int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, - unsigned int block); void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); -unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, - bool is_ce); +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count); /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, struct eeprom_table_record *bps, int pages); -int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev); +int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev); -static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev, - bool is_baco) +static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - /* save bad page to eeprom before gpu reset, - * i2c may be unstable in gpu reset - */ - if (in_task()) - amdgpu_ras_reserve_bad_pages(adev); - if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) schedule_work(&ras->recovery_work); return 0; @@ -541,6 +551,8 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) { return TA_RAS_BLOCK__MP1; case AMDGPU_RAS_BLOCK__FUSE: return TA_RAS_BLOCK__FUSE; + case AMDGPU_RAS_BLOCK__MCA: + return TA_RAS_BLOCK__MCA; default: WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); return TA_RAS_BLOCK__UMC; @@ -590,15 +602,14 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, struct ras_common_if *head); -void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, - struct ras_fs_if *head); - -void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, - struct ras_common_if *head); +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); -int amdgpu_ras_error_query(struct amdgpu_device *adev, +int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info); +int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, + enum amdgpu_ras_block block); + int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info); @@ -611,6 +622,9 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_dispatch_if *info); +struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, + struct ras_common_if *head); + extern atomic_t amdgpu_ras_in_intr; static inline bool amdgpu_ras_intr_triggered(void) @@ -618,6 +632,23 @@ static inline bool amdgpu_ras_intr_triggered(void) return !!atomic_read(&amdgpu_ras_in_intr); } +static inline void amdgpu_ras_intr_cleared(void) +{ + atomic_set(&amdgpu_ras_in_intr, 0); +} + void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); +void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready); + +bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev); + +void amdgpu_release_ras_context(struct amdgpu_device *adev); + +int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev); + +const char *get_ras_block_str(struct ras_common_if *ras_block); + +bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 2a8e04895595..05117eda105b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -25,501 +25,1104 @@ #include "amdgpu.h" #include "amdgpu_ras.h" #include <linux/bits.h> -#include "smu_v11_0_i2c.h" - -#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8 -#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 +#include "atom.h" +#include "amdgpu_eeprom.h" +#include "amdgpu_atomfirmware.h" +#include <linux/debugfs.h> +#include <linux/uaccess.h> + +#define EEPROM_I2C_MADDR_VEGA20 0x0 +#define EEPROM_I2C_MADDR_ARCTURUS 0x40000 +#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0 +#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0 +#define EEPROM_I2C_MADDR_ALDEBARAN 0x0 /* * The 2 macros bellow represent the actual size in bytes that * those entities occupy in the EEPROM memory. - * EEPROM_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which + * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which * uses uint64 to store 6b fields such as retired_page. */ -#define EEPROM_TABLE_HEADER_SIZE 20 -#define EEPROM_TABLE_RECORD_SIZE 24 - -#define EEPROM_ADDRESS_SIZE 0x2 +#define RAS_TABLE_HEADER_SIZE 20 +#define RAS_TABLE_RECORD_SIZE 24 /* Table hdr is 'AMDR' */ -#define EEPROM_TABLE_HDR_VAL 0x414d4452 -#define EEPROM_TABLE_VER 0x00010000 +#define RAS_TABLE_HDR_VAL 0x414d4452 +#define RAS_TABLE_VER 0x00010000 + +/* Bad GPU tag ‘BADG’ */ +#define RAS_TABLE_HDR_BAD 0x42414447 + +/* Assume 2-Mbit size EEPROM and take up the whole space. */ +#define RAS_TBL_SIZE_BYTES (256 * 1024) +#define RAS_TABLE_START 0 +#define RAS_HDR_START RAS_TABLE_START +#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE) +#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + +/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM + * offset off of RAS_TABLE_START. That is, this is something you can + * add to control->i2c_address, and then tell I2C layer to read + * from/write to there. _N is the so called absolute index, + * because it starts right after the table header. + */ +#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \ + (_N) * RAS_TABLE_RECORD_SIZE) + +#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \ + (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE) -/* Assume 2 Mbit size */ -#define EEPROM_SIZE_BYTES 256000 -#define EEPROM_PAGE__SIZE_BYTES 256 -#define EEPROM_HDR_START 0 -#define EEPROM_RECORD_START (EEPROM_HDR_START + EEPROM_TABLE_HEADER_SIZE) -#define EEPROM_MAX_RECORD_NUM ((EEPROM_SIZE_BYTES - EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE) -#define EEPROM_ADDR_MSB_MASK GENMASK(17, 8) +/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off + * of "fri", return the absolute record index off of the end of + * the table header. + */ +#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \ + (_C)->ras_max_record_count) + +#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE) #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev -static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr, - unsigned char *buff) +static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) { - uint32_t *pp = (uint32_t *) buff; - - pp[0] = cpu_to_le32(hdr->header); - pp[1] = cpu_to_le32(hdr->version); - pp[2] = cpu_to_le32(hdr->first_rec_offset); - pp[3] = cpu_to_le32(hdr->tbl_size); - pp[4] = cpu_to_le32(hdr->checksum); + return adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS || + adev->asic_type == CHIP_SIENNA_CICHLID || + adev->asic_type == CHIP_ALDEBARAN; } -static void __decode_table_header_from_buff(struct amdgpu_ras_eeprom_table_header *hdr, - unsigned char *buff) +static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev, + struct amdgpu_ras_eeprom_control *control) { - uint32_t *pp = (uint32_t *)buff; + struct atom_context *atom_ctx = adev->mode_info.atom_context; - hdr->header = le32_to_cpu(pp[0]); - hdr->version = le32_to_cpu(pp[1]); - hdr->first_rec_offset = le32_to_cpu(pp[2]); - hdr->tbl_size = le32_to_cpu(pp[3]); - hdr->checksum = le32_to_cpu(pp[4]); + if (!control || !atom_ctx) + return false; + + if (strnstr(atom_ctx->vbios_version, + "D342", + sizeof(atom_ctx->vbios_version))) + control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS_D342; + else + control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS; + + return true; } -static int __update_table_header(struct amdgpu_ras_eeprom_control *control, - unsigned char *buff) +static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev, + struct amdgpu_ras_eeprom_control *control) { - int ret = 0; - struct i2c_msg msg = { - .addr = 0, - .flags = 0, - .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE, - .buf = buff, - }; + u8 i2c_addr; + if (!control) + return false; - *(uint16_t *)buff = EEPROM_HDR_START; - __encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE); - - msg.addr = control->i2c_address; + if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) { + /* The address given by VBIOS is an 8-bit, wire-format + * address, i.e. the most significant byte. + * + * Normalize it to a 19-bit EEPROM address. Remove the + * device type identifier and make it a 7-bit address; + * then make it a 19-bit EEPROM address. See top of + * amdgpu_eeprom.c. + */ + i2c_addr = (i2c_addr & 0x0F) >> 1; + control->i2c_address = ((u32) i2c_addr) << 16; - ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); - if (ret < 1) - DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret); + return true; + } - return ret; -} + switch (adev->asic_type) { + case CHIP_VEGA20: + control->i2c_address = EEPROM_I2C_MADDR_VEGA20; + break; + case CHIP_ARCTURUS: + return __get_eeprom_i2c_addr_arct(adev, control); + case CHIP_SIENNA_CICHLID: + control->i2c_address = EEPROM_I2C_MADDR_SIENNA_CICHLID; + break; -static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) -{ - int i; - uint32_t tbl_sum = 0; + case CHIP_ALDEBARAN: + control->i2c_address = EEPROM_I2C_MADDR_ALDEBARAN; + break; - /* Header checksum, skip checksum field in the calculation */ - for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++) - tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i); + default: + return false; + } - return tbl_sum; + return true; } -static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records, - int num) +static void +__encode_table_header_to_buf(struct amdgpu_ras_eeprom_table_header *hdr, + unsigned char *buf) { - int i, j; - uint32_t tbl_sum = 0; + u32 *pp = (uint32_t *)buf; - /* Records checksum */ - for (i = 0; i < num; i++) { - struct eeprom_table_record *record = &records[i]; + pp[0] = cpu_to_le32(hdr->header); + pp[1] = cpu_to_le32(hdr->version); + pp[2] = cpu_to_le32(hdr->first_rec_offset); + pp[3] = cpu_to_le32(hdr->tbl_size); + pp[4] = cpu_to_le32(hdr->checksum); +} - for (j = 0; j < sizeof(*record); j++) { - tbl_sum += *(((unsigned char *)record) + j); - } - } +static void +__decode_table_header_from_buf(struct amdgpu_ras_eeprom_table_header *hdr, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; - return tbl_sum; + hdr->header = le32_to_cpu(pp[0]); + hdr->version = le32_to_cpu(pp[1]); + hdr->first_rec_offset = le32_to_cpu(pp[2]); + hdr->tbl_size = le32_to_cpu(pp[3]); + hdr->checksum = le32_to_cpu(pp[4]); } -static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) +static int __write_table_header(struct amdgpu_ras_eeprom_control *control) { - return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num); + u8 buf[RAS_TABLE_HEADER_SIZE]; + struct amdgpu_device *adev = to_amdgpu_device(control); + int res; + + memset(buf, 0, sizeof(buf)); + __encode_table_header_to_buf(&control->tbl_hdr, buf); + + /* i2c may be unstable in gpu reset */ + down_read(&adev->reset_sem); + res = amdgpu_eeprom_write(&adev->pm.smu_i2c, + control->i2c_address + + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + up_read(&adev->reset_sem); + + if (res < 0) { + DRM_ERROR("Failed to write EEPROM table header:%d", res); + } else if (res < RAS_TABLE_HEADER_SIZE) { + DRM_ERROR("Short write:%d out of %d\n", + res, RAS_TABLE_HEADER_SIZE); + res = -EIO; + } else { + res = 0; + } + + return res; } -/* Checksum = 256 -((sum of all table entries) mod 256) */ -static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num, - uint32_t old_hdr_byte_sum) +static u8 __calc_hdr_byte_sum(const struct amdgpu_ras_eeprom_control *control) { - /* - * This will update the table sum with new records. - * - * TODO: What happens when the EEPROM table is to be wrapped around - * and old records from start will get overridden. - */ + int ii; + u8 *pp, csum; + size_t sz; - /* need to recalculate updated header byte sum */ - control->tbl_byte_sum -= old_hdr_byte_sum; - control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num); + /* Header checksum, skip checksum field in the calculation */ + sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); + pp = (u8 *) &control->tbl_hdr; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; - control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256); + return csum; } -/* table sum mod 256 + checksum must equals 256 */ -static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) +static int amdgpu_ras_eeprom_correct_header_tag( + struct amdgpu_ras_eeprom_control *control, + uint32_t header) { - control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num); - - if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) { - DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum); - return false; - } - - return true; + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + u8 *hh; + int res; + u8 csum; + + csum = -hdr->checksum; + + hh = (void *) &hdr->header; + csum -= (hh[0] + hh[1] + hh[2] + hh[3]); + hh = (void *) &header; + csum += hh[0] + hh[1] + hh[2] + hh[3]; + csum = -csum; + mutex_lock(&control->ras_tbl_mutex); + hdr->header = header; + hdr->checksum = csum; + res = __write_table_header(control); + mutex_unlock(&control->ras_tbl_mutex); + + return res; } +/** + * amdgpu_ras_eeprom_reset_table -- Reset the RAS EEPROM table + * @control: pointer to control structure + * + * Reset the contents of the header of the RAS EEPROM table. + * Return 0 on success, -errno on error. + */ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) { - unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; - int ret = 0; + u8 csum; + int res; - mutex_lock(&control->tbl_mutex); + mutex_lock(&control->ras_tbl_mutex); - hdr->header = EEPROM_TABLE_HDR_VAL; - hdr->version = EEPROM_TABLE_VER; - hdr->first_rec_offset = EEPROM_RECORD_START; - hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE; + hdr->header = RAS_TABLE_HDR_VAL; + hdr->version = RAS_TABLE_VER; + hdr->first_rec_offset = RAS_RECORD_START; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE; - control->tbl_byte_sum = 0; - __update_tbl_checksum(control, NULL, 0, 0); - control->next_addr = EEPROM_RECORD_START; + csum = __calc_hdr_byte_sum(control); + csum = -csum; + hdr->checksum = csum; + res = __write_table_header(control); - ret = __update_table_header(control, buff); + control->ras_num_recs = 0; + control->ras_fri = 0; - mutex_unlock(&control->tbl_mutex); + amdgpu_ras_debugfs_set_ret_size(control); - return ret; + mutex_unlock(&control->ras_tbl_mutex); + return res; } -int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) +static void +__encode_table_record_to_buf(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, + unsigned char *buf) { - int ret = 0; - struct amdgpu_device *adev = to_amdgpu_device(control); - unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; - struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; - struct i2c_msg msg = { - .addr = 0, - .flags = I2C_M_RD, - .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE, - .buf = buff, - }; + __le64 tmp = 0; + int i = 0; - mutex_init(&control->tbl_mutex); + /* Next are all record fields according to EEPROM page spec in LE foramt */ + buf[i++] = record->err_type; - switch (adev->asic_type) { - case CHIP_VEGA20: - control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20; - ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); - break; + buf[i++] = record->bank; - case CHIP_ARCTURUS: - control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS; - ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); - break; + tmp = cpu_to_le64(record->ts); + memcpy(buf + i, &tmp, 8); + i += 8; - default: - return 0; - } + tmp = cpu_to_le64((record->offset & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); + i += 6; - if (ret) { - DRM_ERROR("Failed to init I2C controller, ret:%d", ret); - return ret; - } + buf[i++] = record->mem_channel; + buf[i++] = record->mcumc_id; - msg.addr = control->i2c_address; + tmp = cpu_to_le64((record->retired_page & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); +} - /* Read/Create table header from EEPROM address 0 */ - ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); - if (ret < 1) { - DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret); - return ret; - } +static void +__decode_table_record_from_buf(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, + unsigned char *buf) +{ + __le64 tmp = 0; + int i = 0; - __decode_table_header_from_buff(hdr, &buff[2]); + /* Next are all record fields according to EEPROM page spec in LE foramt */ + record->err_type = buf[i++]; - if (hdr->header == EEPROM_TABLE_HDR_VAL) { - control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) / - EEPROM_TABLE_RECORD_SIZE; - control->tbl_byte_sum = __calc_hdr_byte_sum(control); - control->next_addr = EEPROM_RECORD_START; + record->bank = buf[i++]; - DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", - control->num_recs); + memcpy(&tmp, buf + i, 8); + record->ts = le64_to_cpu(tmp); + i += 8; - } else { - DRM_INFO("Creating new EEPROM table"); + memcpy(&tmp, buf + i, 6); + record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); + i += 6; - ret = amdgpu_ras_eeprom_reset_table(control); - } + record->mem_channel = buf[i++]; + record->mcumc_id = buf[i++]; - return ret == 1 ? 0 : -EIO; + memcpy(&tmp, buf + i, 6); + record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff); } -void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control) +bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - switch (adev->asic_type) { - case CHIP_VEGA20: - smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor); - break; - case CHIP_ARCTURUS: - smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor); - break; + if (!__is_ras_eeprom_supported(adev)) + return false; - default: - return; + /* skip check eeprom table for VEGA20 Gaming */ + if (!con) + return false; + else + if (!(con->features & BIT(AMDGPU_RAS_BLOCK__UMC))) + return false; + + if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) { + dev_warn(adev->dev, "This GPU is in BAD status."); + dev_warn(adev->dev, "Please retire it or set a larger " + "threshold value when reloading driver.\n"); + return true; } + + return false; } -static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *record, - unsigned char *buff) +/** + * __amdgpu_ras_eeprom_write -- write indexed from buffer to EEPROM + * @control: pointer to control structure + * @buf: pointer to buffer containing data to write + * @fri: start writing at this index + * @num: number of records to write + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) { - __le64 tmp = 0; - int i = 0; + struct amdgpu_device *adev = to_amdgpu_device(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + down_read(&adev->reset_sem); + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = amdgpu_eeprom_write(&adev->pm.smu_i2c, + control->i2c_address + + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + up_read(&adev->reset_sem); + if (res < 0) { + DRM_ERROR("Writing %d EEPROM table records error:%d", + num, res); + } else if (res < buf_size) { + /* Short write, return error. + */ + DRM_ERROR("Wrote %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); + res = -EIO; + } else { + res = 0; + } - /* Next are all record fields according to EEPROM page spec in LE foramt */ - buff[i++] = record->err_type; + return res; +} - buff[i++] = record->bank; +static int +amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, + const u32 num) +{ + u32 a, b, i; + u8 *buf, *pp; + int res; - tmp = cpu_to_le64(record->ts); - memcpy(buff + i, &tmp, 8); - i += 8; + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; - tmp = cpu_to_le64((record->offset & 0xffffffffffff)); - memcpy(buff + i, &tmp, 6); - i += 6; + /* Encode all of them in one go. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) + __encode_table_record_to_buf(control, &record[i], pp); + + /* a, first record index to write into. + * b, last record index to write into. + * a = first index to read (fri) + number of records in the table, + * b = a + @num - 1. + * Let N = control->ras_max_num_record_count, then we have, + * case 0: 0 <= a <= b < N, + * just append @num records starting at a; + * case 1: 0 <= a < N <= b, + * append (N - a) records starting at a, and + * append the remainder, b % N + 1, starting at 0. + * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases, + * case 2a: 0 <= a <= b < N + * append num records starting at a; and fix fri if b overwrote it, + * and since a <= b, if b overwrote it then a must've also, + * and if b didn't overwrite it, then a didn't also. + * case 2b: 0 <= b < a < N + * write num records starting at a, which wraps around 0=N + * and overwrite fri unconditionally. Now from case 2a, + * this means that b eclipsed fri to overwrite it and wrap + * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally + * set fri = b + 1 (mod N). + * Now, since fri is updated in every case, except the trivial case 0, + * the number of records present in the table after writing, is, + * num_recs - 1 = b - fri (mod N), and we take the positive value, + * by adding an arbitrary multiple of N before taking the modulo N + * as shown below. + */ + a = control->ras_fri + control->ras_num_recs; + b = a + num - 1; + if (b < control->ras_max_record_count) { + res = __amdgpu_ras_eeprom_write(control, buf, a, num); + } else if (a < control->ras_max_record_count) { + u32 g0, g1; + + g0 = control->ras_max_record_count - a; + g1 = b % control->ras_max_record_count + 1; + res = __amdgpu_ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __amdgpu_ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, + 0, g1); + if (res) + goto Out; + if (g1 > control->ras_fri) + control->ras_fri = g1 % control->ras_max_record_count; + } else { + a %= control->ras_max_record_count; + b %= control->ras_max_record_count; + + if (a <= b) { + /* Note that, b - a + 1 = num. */ + res = __amdgpu_ras_eeprom_write(control, buf, a, num); + if (res) + goto Out; + if (b >= control->ras_fri) + control->ras_fri = (b + 1) % control->ras_max_record_count; + } else { + u32 g0, g1; + + /* b < a, which means, we write from + * a to the end of the table, and from + * the start of the table to b. + */ + g0 = control->ras_max_record_count - a; + g1 = b + 1; + res = __amdgpu_ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __amdgpu_ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, + 0, g1); + if (res) + goto Out; + control->ras_fri = g1 % control->ras_max_record_count; + } + } + control->ras_num_recs = 1 + (control->ras_max_record_count + b + - control->ras_fri) + % control->ras_max_record_count; +Out: + kfree(buf); + return res; +} - buff[i++] = record->mem_channel; - buff[i++] = record->mcumc_id; +static int +amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + u8 *buf, *pp, csum; + u32 buf_size; + int res; - tmp = cpu_to_le64((record->retired_page & 0xffffffffffff)); - memcpy(buff + i, &tmp, 6); + /* Modify the header if it exceeds. + */ + if (amdgpu_bad_page_threshold != 0 && + control->ras_num_recs >= ras->bad_page_cnt_threshold) { + dev_warn(adev->dev, + "Saved bad pages %d reaches threshold value %d\n", + control->ras_num_recs, ras->bad_page_cnt_threshold); + control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + } + + control->tbl_hdr.version = RAS_TABLE_VER; + control->tbl_hdr.first_rec_offset = RAS_INDEX_TO_OFFSET(control, control->ras_fri); + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + control->tbl_hdr.checksum = 0; + + buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) { + DRM_ERROR("allocating memory for table of size %d bytes failed\n", + control->tbl_hdr.tbl_size); + res = -ENOMEM; + goto Out; + } + + down_read(&adev->reset_sem); + res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + control->i2c_address + + control->ras_record_offset, + buf, buf_size); + up_read(&adev->reset_sem); + if (res < 0) { + DRM_ERROR("EEPROM failed reading records:%d\n", + res); + goto Out; + } else if (res < buf_size) { + DRM_ERROR("EEPROM read %d out of %d bytes\n", + res, buf_size); + res = -EIO; + goto Out; + } + + /* Recalc the checksum. + */ + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; + + csum += __calc_hdr_byte_sum(control); + /* avoid sign extension when assigning to "checksum" */ + csum = -csum; + control->tbl_hdr.checksum = csum; + res = __write_table_header(control); +Out: + kfree(buf); + return res; } -static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *record, - unsigned char *buff) +/** + * amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table + * @control: pointer to control structure + * @record: array of records to append + * @num: number of records in @record array + * + * Append @num records to the table, calculate the checksum and write + * the table back to EEPROM. The maximum number of records that + * can be appended is between 1 and control->ras_max_record_count, + * regardless of how many records are already stored in the table. + * + * Return 0 on success or if EEPROM is not supported, -errno on error. + */ +int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, + const u32 num) { - __le64 tmp = 0; - int i = 0; + struct amdgpu_device *adev = to_amdgpu_device(control); + int res; - /* Next are all record fields according to EEPROM page spec in LE foramt */ - record->err_type = buff[i++]; + if (!__is_ras_eeprom_supported(adev)) + return 0; - record->bank = buff[i++]; + if (num == 0) { + DRM_ERROR("will not append 0 records\n"); + return -EINVAL; + } else if (num > control->ras_max_record_count) { + DRM_ERROR("cannot append %d records than the size of table %d\n", + num, control->ras_max_record_count); + return -EINVAL; + } - memcpy(&tmp, buff + i, 8); - record->ts = le64_to_cpu(tmp); - i += 8; + mutex_lock(&control->ras_tbl_mutex); - memcpy(&tmp, buff + i, 6); - record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); - i += 6; + res = amdgpu_ras_eeprom_append_table(control, record, num); + if (!res) + res = amdgpu_ras_eeprom_update_header(control); + if (!res) + amdgpu_ras_debugfs_set_ret_size(control); - record->mem_channel = buff[i++]; - record->mcumc_id = buff[i++]; + mutex_unlock(&control->ras_tbl_mutex); + return res; +} - memcpy(&tmp, buff + i, 6); - record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff); +/** + * __amdgpu_ras_eeprom_read -- read indexed from EEPROM into buffer + * @control: pointer to control structure + * @buf: pointer to buffer to read into + * @fri: first record index, start reading at this index, absolute index + * @num: number of records to read + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + down_read(&adev->reset_sem); + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + control->i2c_address + + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + up_read(&adev->reset_sem); + if (res < 0) { + DRM_ERROR("Reading %d EEPROM table records error:%d", + num, res); + } else if (res < buf_size) { + /* Short read, return error. + */ + DRM_ERROR("Read %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); + res = -EIO; + } else { + res = 0; + } + + return res; } -/* - * When reaching end of EEPROM memory jump back to 0 record address - * When next record access will go beyond EEPROM page boundary modify bits A17/A8 - * in I2C selector to go to next page +/** + * amdgpu_ras_eeprom_read -- read EEPROM + * @control: pointer to control structure + * @record: array of records to read into + * @num: number of records in @record + * + * Reads num records from the RAS table in EEPROM and + * writes the data into @record array. + * + * Returns 0 on success, -errno on error. */ -static uint32_t __correct_eeprom_dest_address(uint32_t curr_address) +int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *record, + const u32 num) { - uint32_t next_address = curr_address + EEPROM_TABLE_RECORD_SIZE; + struct amdgpu_device *adev = to_amdgpu_device(control); + int i, res; + u8 *buf, *pp; + u32 g0, g1; + + if (!__is_ras_eeprom_supported(adev)) + return 0; - /* When all EEPROM memory used jump back to 0 address */ - if (next_address > EEPROM_SIZE_BYTES) { - DRM_INFO("Reached end of EEPROM memory, jumping to 0 " - "and overriding old record"); - return EEPROM_RECORD_START; + if (num == 0) { + DRM_ERROR("will not read 0 records\n"); + return -EINVAL; + } else if (num > control->ras_num_recs) { + DRM_ERROR("too many records to read:%d available:%d\n", + num, control->ras_num_recs); + return -EINVAL; } - /* - * To check if we overflow page boundary compare next address with - * current and see if bits 17/8 of the EEPROM address will change - * If they do start from the next 256b page + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Determine how many records to read, from the first record + * index, fri, to the end of the table, and from the beginning + * of the table, such that the total number of records is + * @num, and we handle wrap around when fri > 0 and + * fri + num > RAS_MAX_RECORD_COUNT. + * + * First we compute the index of the last element + * which would be fetched from each region, + * g0 is in [fri, fri + num - 1], and + * g1 is in [0, RAS_MAX_RECORD_COUNT - 1]. + * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of + * the last element to fetch, we set g0 to _the number_ + * of elements to fetch, @num, since we know that the last + * indexed to be fetched does not exceed the table. * - * https://www.st.com/resource/en/datasheet/m24m02-dr.pdf sec. 5.1.2 + * If, however, g0 >= RAS_MAX_RECORD_COUNT, then + * we set g0 to the number of elements to read + * until the end of the table, and g1 to the number of + * elements to read from the beginning of the table. */ - if ((curr_address & EEPROM_ADDR_MSB_MASK) != (next_address & EEPROM_ADDR_MSB_MASK)) { - DRM_DEBUG_DRIVER("Reached end of EEPROM memory page, jumping to next: %lx", - (next_address & EEPROM_ADDR_MSB_MASK)); + g0 = control->ras_fri + num - 1; + g1 = g0 % control->ras_max_record_count; + if (g0 < control->ras_max_record_count) { + g0 = num; + g1 = 0; + } else { + g0 = control->ras_max_record_count - control->ras_fri; + g1 += 1; + } - return (next_address & EEPROM_ADDR_MSB_MASK); + mutex_lock(&control->ras_tbl_mutex); + res = __amdgpu_ras_eeprom_read(control, buf, control->ras_fri, g0); + if (res) + goto Out; + if (g1) { + res = __amdgpu_ras_eeprom_read(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, + 0, g1); + if (res) + goto Out; } - return curr_address; + res = 0; + + /* Read up everything? Then transform. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) + __decode_table_record_from_buf(control, &record[i], pp); +Out: + kfree(buf); + mutex_unlock(&control->ras_tbl_mutex); + + return res; } -int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, - bool write, - int num) +uint32_t amdgpu_ras_eeprom_max_record_count(void) { - int i, ret = 0; - struct i2c_msg *msgs, *msg; - unsigned char *buffs, *buff; - struct eeprom_table_record *record; - struct amdgpu_device *adev = to_amdgpu_device(control); + return RAS_MAX_RECORD_COUNT; +} + +static ssize_t +amdgpu_ras_debugfs_eeprom_size_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL; + u8 data[50]; + int res; + + if (!size) + return size; + + if (!ras || !control) { + res = snprintf(data, sizeof(data), "Not supported\n"); + } else { + res = snprintf(data, sizeof(data), "%d bytes or %d records\n", + RAS_TBL_SIZE_BYTES, control->ras_max_record_count); + } - if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS) + if (*pos >= res) return 0; - buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE, - GFP_KERNEL); - if (!buffs) - return -ENOMEM; + res -= *pos; + res = min_t(size_t, res, size); - mutex_lock(&control->tbl_mutex); + if (copy_to_user(buf, &data[*pos], res)) + return -EFAULT; - msgs = kcalloc(num, sizeof(*msgs), GFP_KERNEL); - if (!msgs) { - ret = -ENOMEM; - goto free_buff; - } + *pos += res; + + return res; +} + +const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops = { + .owner = THIS_MODULE, + .read = amdgpu_ras_debugfs_eeprom_size_read, + .write = NULL, + .llseek = default_llseek, +}; + +static const char *tbl_hdr_str = " Signature Version FirstOffs Size Checksum\n"; +static const char *tbl_hdr_fmt = "0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n"; +#define tbl_hdr_fmt_size (5 * (2+8) + 4 + 1) +static const char *rec_hdr_str = "Index Offset ErrType Bank/CU TimeStamp Offs/Addr MemChl MCUMCID RetiredPage\n"; +static const char *rec_hdr_fmt = "%5d 0x%05X %7s 0x%02X 0x%016llX 0x%012llX 0x%02X 0x%02X 0x%012llX\n"; +#define rec_hdr_fmt_size (5 + 1 + 7 + 1 + 7 + 1 + 7 + 1 + 18 + 1 + 14 + 1 + 6 + 1 + 7 + 1 + 14 + 1) + +static const char *record_err_type_str[AMDGPU_RAS_EEPROM_ERR_COUNT] = { + "ignore", + "re", + "ue", +}; + +static loff_t amdgpu_ras_debugfs_table_size(struct amdgpu_ras_eeprom_control *control) +{ + return strlen(tbl_hdr_str) + tbl_hdr_fmt_size + + strlen(rec_hdr_str) + rec_hdr_fmt_size * control->ras_num_recs; +} - /* In case of overflow just start from beginning to not lose newest records */ - if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES)) - control->next_addr = EEPROM_RECORD_START; +void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_ras *ras = container_of(control, struct amdgpu_ras, + eeprom_control); + struct dentry *de = ras->de_ras_eeprom_table; + if (de) + d_inode(de)->i_size = amdgpu_ras_debugfs_table_size(control); +} - /* - * TODO Currently makes EEPROM writes for each record, this creates - * internal fragmentation. Optimized the code to do full page write of - * 256b - */ - for (i = 0; i < num; i++) { - buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - record = &records[i]; - msg = &msgs[i]; +static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control = &ras->eeprom_control; + const size_t orig_size = size; + int res = -EFAULT; + size_t data_len; - control->next_addr = __correct_eeprom_dest_address(control->next_addr); + mutex_lock(&control->ras_tbl_mutex); - /* - * Update bits 16,17 of EEPROM address in I2C address by setting them - * to bits 1,2 of Device address byte - */ - msg->addr = control->i2c_address | - ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15); - msg->flags = write ? 0 : I2C_M_RD; - msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE; - msg->buf = buff; - - /* Insert the EEPROM dest addess, bits 0-15 */ - buff[0] = ((control->next_addr >> 8) & 0xff); - buff[1] = (control->next_addr & 0xff); - - /* EEPROM table content is stored in LE format */ - if (write) - __encode_table_record_to_buff(control, record, buff + EEPROM_ADDRESS_SIZE); - - /* - * The destination EEPROM address might need to be corrected to account - * for page or entire memory wrapping - */ - control->next_addr += EEPROM_TABLE_RECORD_SIZE; + /* We want *pos - data_len > 0, which means there's + * bytes to be printed from data. + */ + data_len = strlen(tbl_hdr_str); + if (*pos < data_len) { + data_len -= *pos; + data_len = min_t(size_t, data_len, size); + if (copy_to_user(buf, &tbl_hdr_str[*pos], data_len)) + goto Out; + buf += data_len; + size -= data_len; + *pos += data_len; } - ret = i2c_transfer(&control->eeprom_accessor, msgs, num); - if (ret < 1) { - DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret); - - /* TODO Restore prev next EEPROM address ? */ - goto free_msgs; + data_len = strlen(tbl_hdr_str) + tbl_hdr_fmt_size; + if (*pos < data_len && size > 0) { + u8 data[tbl_hdr_fmt_size + 1]; + loff_t lpos; + + snprintf(data, sizeof(data), tbl_hdr_fmt, + control->tbl_hdr.header, + control->tbl_hdr.version, + control->tbl_hdr.first_rec_offset, + control->tbl_hdr.tbl_size, + control->tbl_hdr.checksum); + + data_len -= *pos; + data_len = min_t(size_t, data_len, size); + lpos = *pos - strlen(tbl_hdr_str); + if (copy_to_user(buf, &data[lpos], data_len)) + goto Out; + buf += data_len; + size -= data_len; + *pos += data_len; } + data_len = strlen(tbl_hdr_str) + tbl_hdr_fmt_size + strlen(rec_hdr_str); + if (*pos < data_len && size > 0) { + loff_t lpos; + + data_len -= *pos; + data_len = min_t(size_t, data_len, size); + lpos = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size; + if (copy_to_user(buf, &rec_hdr_str[lpos], data_len)) + goto Out; + buf += data_len; + size -= data_len; + *pos += data_len; + } - if (!write) { - for (i = 0; i < num; i++) { - buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - record = &records[i]; + data_len = amdgpu_ras_debugfs_table_size(control); + if (*pos < data_len && size > 0) { + u8 dare[RAS_TABLE_RECORD_SIZE]; + u8 data[rec_hdr_fmt_size + 1]; + struct eeprom_table_record record; + int s, r; - __decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE); + /* Find the starting record index + */ + s = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size - + strlen(rec_hdr_str); + s = s / rec_hdr_fmt_size; + r = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size - + strlen(rec_hdr_str); + r = r % rec_hdr_fmt_size; + + for ( ; size > 0 && s < control->ras_num_recs; s++) { + u32 ai = RAS_RI_TO_AI(control, s); + /* Read a single record + */ + res = __amdgpu_ras_eeprom_read(control, dare, ai, 1); + if (res) + goto Out; + __decode_table_record_from_buf(control, &record, dare); + snprintf(data, sizeof(data), rec_hdr_fmt, + s, + RAS_INDEX_TO_OFFSET(control, ai), + record_err_type_str[record.err_type], + record.bank, + record.ts, + record.offset, + record.mem_channel, + record.mcumc_id, + record.retired_page); + + data_len = min_t(size_t, rec_hdr_fmt_size - r, size); + if (copy_to_user(buf, &data[r], data_len)) { + res = -EFAULT; + goto Out; + } + buf += data_len; + size -= data_len; + *pos += data_len; + r = 0; } } + res = 0; +Out: + mutex_unlock(&control->ras_tbl_mutex); + return res < 0 ? res : orig_size - size; +} + +static ssize_t +amdgpu_ras_debugfs_eeprom_table_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL; + u8 data[81]; + int res; - if (write) { - uint32_t old_hdr_byte_sum = __calc_hdr_byte_sum(control); + if (!size) + return size; - /* - * Update table header with size and CRC and account for table - * wrap around where the assumption is that we treat it as empty - * table - * - * TODO - Check the assumption is correct - */ - control->num_recs += num; - control->num_recs %= EEPROM_MAX_RECORD_NUM; - control->tbl_hdr.tbl_size += EEPROM_TABLE_RECORD_SIZE * num; - if (control->tbl_hdr.tbl_size > EEPROM_SIZE_BYTES) - control->tbl_hdr.tbl_size = EEPROM_TABLE_HEADER_SIZE + - control->num_recs * EEPROM_TABLE_RECORD_SIZE; - - __update_tbl_checksum(control, records, num, old_hdr_byte_sum); - - __update_table_header(control, buffs); - } else if (!__validate_tbl_checksum(control, records, num)) { - DRM_WARN("EEPROM Table checksum mismatch!"); - /* TODO Uncomment when EEPROM read/write is relliable */ - /* ret = -EIO; */ + if (!ras || !control) { + res = snprintf(data, sizeof(data), "Not supported\n"); + if (*pos >= res) + return 0; + + res -= *pos; + res = min_t(size_t, res, size); + + if (copy_to_user(buf, &data[*pos], res)) + return -EFAULT; + + *pos += res; + + return res; + } else { + return amdgpu_ras_debugfs_table_read(f, buf, size, pos); } +} -free_msgs: - kfree(msgs); +const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops = { + .owner = THIS_MODULE, + .read = amdgpu_ras_debugfs_eeprom_table_read, + .write = NULL, + .llseek = default_llseek, +}; -free_buff: - kfree(buffs); +/** + * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum + * @control: pointer to control structure + * + * Check the checksum of the stored in EEPROM RAS table. + * + * Return 0 if the checksum is correct, + * positive if it is not correct, and + * -errno on I/O error. + */ +static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int buf_size, res; + u8 csum, *buf, *pp; + + buf_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + DRM_ERROR("Out of memory checking RAS table checksum.\n"); + return -ENOMEM; + } - mutex_unlock(&control->tbl_mutex); + res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + control->i2c_address + + control->ras_header_offset, + buf, buf_size); + if (res < buf_size) { + DRM_ERROR("Partial read for checksum, res:%d\n", res); + /* On partial reads, return -EIO. + */ + if (res >= 0) + res = -EIO; + goto Out; + } - return ret == num ? 0 : -EIO; + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; +Out: + kfree(buf); + return res < 0 ? res : csum; } -/* Used for testing if bugs encountered */ -#if 0 -void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control) +int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, + bool *exceed_err_limit) { - int i; - struct eeprom_table_record *recs = kcalloc(1, sizeof(*recs), GFP_KERNEL); + struct amdgpu_device *adev = to_amdgpu_device(control); + unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 }; + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int res; + + *exceed_err_limit = false; - if (!recs) - return; + if (!__is_ras_eeprom_supported(adev)) + return 0; - for (i = 0; i < 1 ; i++) { - recs[i].address = 0xdeadbeef; - recs[i].retired_page = i; + /* Verify i2c adapter is initialized */ + if (!adev->pm.smu_i2c.algo) + return -ENOENT; + + if (!__get_eeprom_i2c_addr(adev, control)) + return -EINVAL; + + control->ras_header_offset = RAS_HDR_START; + control->ras_record_offset = RAS_RECORD_START; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + mutex_init(&control->ras_tbl_mutex); + + /* Read the table header from EEPROM address */ + res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + control->i2c_address + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + if (res < RAS_TABLE_HEADER_SIZE) { + DRM_ERROR("Failed to read EEPROM table header, res:%d", res); + return res >= 0 ? -EIO : res; } - if (!amdgpu_ras_eeprom_process_recods(control, recs, true, 1)) { + __decode_table_header_from_buf(hdr, buf); - memset(recs, 0, sizeof(*recs) * 1); + control->ras_num_recs = RAS_NUM_RECS(hdr); + control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset); - control->next_addr = EEPROM_RECORD_START; + if (hdr->header == RAS_TABLE_HDR_VAL) { + DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", + control->ras_num_recs); + res = __verify_ras_table_checksum(control); + if (res) + DRM_ERROR("RAS table incorrect checksum or error:%d\n", + res); - if (!amdgpu_ras_eeprom_process_recods(control, recs, false, 1)) { - for (i = 0; i < 1; i++) - DRM_INFO("rec.address :0x%llx, rec.retired_page :%llu", - recs[i].address, recs[i].retired_page); - } else - DRM_ERROR("Failed in reading from table"); + /* Warn if we are at 90% of the threshold or above + */ + if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold) + dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d", + control->ras_num_recs, + ras->bad_page_cnt_threshold); + } else if (hdr->header == RAS_TABLE_HDR_BAD && + amdgpu_bad_page_threshold != 0) { + res = __verify_ras_table_checksum(control); + if (res) + DRM_ERROR("RAS Table incorrect checksum or error:%d\n", + res); + if (ras->bad_page_cnt_threshold > control->ras_num_recs) { + /* This means that, the threshold was increased since + * the last time the system was booted, and now, + * ras->bad_page_cnt_threshold - control->num_recs > 0, + * so that at least one more record can be saved, + * before the page count threshold is reached. + */ + dev_info(adev->dev, + "records:%d threshold:%d, resetting " + "RAS table header signature", + control->ras_num_recs, + ras->bad_page_cnt_threshold); + res = amdgpu_ras_eeprom_correct_header_tag(control, + RAS_TABLE_HDR_VAL); + } else { + dev_err(adev->dev, "RAS records:%d exceed threshold:%d", + control->ras_num_recs, ras->bad_page_cnt_threshold); + if (amdgpu_bad_page_threshold == -2) { + dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2."); + res = 0; + } else { + *exceed_err_limit = true; + dev_err(adev->dev, + "RAS records:%d exceed threshold:%d, " + "GPU will not be initialized. Replace this GPU or increase the threshold", + control->ras_num_recs, ras->bad_page_cnt_threshold); + } + } + } else { + DRM_INFO("Creating a new EEPROM table"); + + res = amdgpu_ras_eeprom_reset_table(control); + } - } else - DRM_ERROR("Failed in writing to table"); + return res < 0 ? res : 0; } -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ca78f812d436..6bb00578bfbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -28,10 +28,11 @@ struct amdgpu_device; -enum amdgpu_ras_eeprom_err_type{ - AMDGPU_RAS_EEPROM_ERR_PLACE_HOLDER, +enum amdgpu_ras_eeprom_err_type { + AMDGPU_RAS_EEPROM_ERR_NA, AMDGPU_RAS_EEPROM_ERR_RECOVERABLE, - AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE + AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE, + AMDGPU_RAS_EEPROM_ERR_COUNT, }; struct amdgpu_ras_eeprom_table_header { @@ -40,17 +41,45 @@ struct amdgpu_ras_eeprom_table_header { uint32_t first_rec_offset; uint32_t tbl_size; uint32_t checksum; -}__attribute__((__packed__)); +} __packed; struct amdgpu_ras_eeprom_control { struct amdgpu_ras_eeprom_table_header tbl_hdr; - struct i2c_adapter eeprom_accessor; - uint32_t next_addr; - unsigned int num_recs; - struct mutex tbl_mutex; - bool bus_locked; - uint32_t tbl_byte_sum; - uint16_t i2c_address; // 8-bit represented address + + /* Base I2C EEPPROM 19-bit memory address, + * where the table is located. For more information, + * see top of amdgpu_eeprom.c. + */ + u32 i2c_address; + + /* The byte offset off of @i2c_address + * where the table header is found, + * and where the records start--always + * right after the header. + */ + u32 ras_header_offset; + u32 ras_record_offset; + + /* Number of records in the table. + */ + u32 ras_num_recs; + + /* First record index to read, 0-based. + * Range is [0, num_recs-1]. This is + * an absolute index, starting right after + * the table header. + */ + u32 ras_fri; + + /* Maximum possible number of records + * we could store, i.e. the maximum capacity + * of the table. + */ + u32 ras_max_record_count; + + /* Protect table access via this mutex. + */ + struct mutex ras_tbl_mutex; }; /* @@ -76,17 +105,26 @@ struct eeprom_table_record { unsigned char mem_channel; unsigned char mcumc_id; -}__attribute__((__packed__)); +} __packed; + +int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, + bool *exceed_err_limit); -int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); -void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); -int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, - bool write, - int num); +bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev); + +int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, const u32 num); + +int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, const u32 num); + +uint32_t amdgpu_ras_eeprom_max_record_count(void); + +void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); -void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control); +extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; +extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; #endif // _AMDGPU_RAS_EEPROM_H diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h new file mode 100644 index 000000000000..acfa207cf970 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef __AMDGPU_RES_CURSOR_H__ +#define __AMDGPU_RES_CURSOR_H__ + +#include <drm/drm_mm.h> +#include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_range_manager.h> + +/* state back for walking over vram_mgr and gtt_mgr allocations */ +struct amdgpu_res_cursor { + uint64_t start; + uint64_t size; + uint64_t remaining; + struct drm_mm_node *node; +}; + +/** + * amdgpu_res_first - initialize a amdgpu_res_cursor + * + * @res: TTM resource object to walk + * @start: Start of the range + * @size: Size of the range + * @cur: cursor object to initialize + * + * Start walking over the range of allocations between @start and @size. + */ +static inline void amdgpu_res_first(struct ttm_resource *res, + uint64_t start, uint64_t size, + struct amdgpu_res_cursor *cur) +{ + struct drm_mm_node *node; + + if (!res || res->mem_type == TTM_PL_SYSTEM) { + cur->start = start; + cur->size = size; + cur->remaining = size; + cur->node = NULL; + WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); + return; + } + + BUG_ON(start + size > res->num_pages << PAGE_SHIFT); + + node = to_ttm_range_mgr_node(res)->mm_nodes; + while (start >= node->size << PAGE_SHIFT) + start -= node++->size << PAGE_SHIFT; + + cur->start = (node->start << PAGE_SHIFT) + start; + cur->size = min((node->size << PAGE_SHIFT) - start, size); + cur->remaining = size; + cur->node = node; +} + +/** + * amdgpu_res_next - advance the cursor + * + * @cur: the cursor to advance + * @size: number of bytes to move forward + * + * Move the cursor @size bytes forwrad, walking to the next node if necessary. + */ +static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) +{ + struct drm_mm_node *node = cur->node; + + BUG_ON(size > cur->remaining); + + cur->remaining -= size; + if (!cur->remaining) + return; + + cur->size -= size; + if (cur->size) { + cur->start += size; + return; + } + + cur->node = ++node; + cur->start = node->start << PAGE_SHIFT; + cur->size = min(node->size << PAGE_SHIFT, cur->remaining); +} + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c new file mode 100644 index 000000000000..02afd4115675 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -0,0 +1,98 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_reset.h" +#include "aldebaran.h" + +int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_handler *handler) +{ + /* TODO: Check if handler exists? */ + list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers); + return 0; +} + +int amdgpu_reset_init(struct amdgpu_device *adev) +{ + int ret = 0; + + switch (adev->asic_type) { + case CHIP_ALDEBARAN: + ret = aldebaran_reset_init(adev); + break; + default: + break; + } + + return ret; +} + +int amdgpu_reset_fini(struct amdgpu_device *adev) +{ + int ret = 0; + + switch (adev->asic_type) { + case CHIP_ALDEBARAN: + ret = aldebaran_reset_fini(adev); + break; + default: + break; + } + + return ret; +} + +int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_reset_handler *reset_handler = NULL; + + if (adev->reset_cntl && adev->reset_cntl->get_reset_handler) + reset_handler = adev->reset_cntl->get_reset_handler( + adev->reset_cntl, reset_context); + if (!reset_handler) + return -ENOSYS; + + return reset_handler->prepare_hwcontext(adev->reset_cntl, + reset_context); +} + +int amdgpu_reset_perform_reset(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context) +{ + int ret; + struct amdgpu_reset_handler *reset_handler = NULL; + + if (adev->reset_cntl) + reset_handler = adev->reset_cntl->get_reset_handler( + adev->reset_cntl, reset_context); + if (!reset_handler) + return -ENOSYS; + + ret = reset_handler->perform_reset(adev->reset_cntl, reset_context); + if (ret) + return ret; + + return reset_handler->restore_hwcontext(adev->reset_cntl, + reset_context); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h new file mode 100644 index 000000000000..e00d38d9160a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -0,0 +1,85 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_RESET_H__ +#define __AMDGPU_RESET_H__ + +#include "amdgpu.h" + +enum AMDGPU_RESET_FLAGS { + + AMDGPU_NEED_FULL_RESET = 0, + AMDGPU_SKIP_HW_RESET = 1, +}; + +struct amdgpu_reset_context { + enum amd_reset_method method; + struct amdgpu_device *reset_req_dev; + struct amdgpu_job *job; + struct amdgpu_hive_info *hive; + unsigned long flags; +}; + +struct amdgpu_reset_handler { + enum amd_reset_method reset_method; + struct list_head handler_list; + int (*prepare_env)(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *context); + int (*prepare_hwcontext)(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *context); + int (*perform_reset)(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *context); + int (*restore_hwcontext)(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *context); + int (*restore_env)(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *context); + + int (*do_reset)(struct amdgpu_device *adev); +}; + +struct amdgpu_reset_control { + void *handle; + struct work_struct reset_work; + struct mutex reset_lock; + struct list_head reset_handlers; + atomic_t in_reset; + enum amd_reset_method active_reset; + struct amdgpu_reset_handler *(*get_reset_handler)( + struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_context *context); + void (*async_reset)(struct work_struct *work); +}; + +int amdgpu_reset_init(struct amdgpu_device *adev); +int amdgpu_reset_fini(struct amdgpu_device *adev); + +int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context); + +int amdgpu_reset_perform_reset(struct amdgpu_device *adev, + struct amdgpu_reset_context *reset_context); + +int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, + struct amdgpu_reset_handler *handler); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index e5c83e164d82..ab2351ba9574 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -48,14 +48,10 @@ * wptr. The GPU then starts fetching commands and executes * them until the pointers are equal again. */ -static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring); -static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); /** * amdgpu_ring_alloc - allocate space on the ring buffer * - * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * @ndw: number of dwords to allocate in the ring buffer * @@ -98,7 +94,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) amdgpu_ring_write(ring, ring->funcs->nop); } -/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets +/** + * amdgpu_ring_generic_pad_ib - pad IB with NOP packets * * @ring: amdgpu_ring structure holding ring information * @ib: IB to add NOP packets to @@ -115,7 +112,6 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) * amdgpu_ring_commit - tell the GPU to execute the new * commands on the ring buffer * - * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * * Update the wptr (write pointer) to tell the GPU to @@ -154,92 +150,28 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) } /** - * amdgpu_ring_priority_put - restore a ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Release a request for executing at @priority - */ -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - int i; - - if (!ring->funcs->set_priority) - return; - - if (atomic_dec_return(&ring->num_jobs[priority]) > 0) - return; - - /* no need to restore if the job is already at the lowest priority */ - if (priority == DRM_SCHED_PRIORITY_NORMAL) - return; - - mutex_lock(&ring->priority_mutex); - /* something higher prio is executing, no need to decay */ - if (ring->priority > priority) - goto out_unlock; - - /* decay priority to the next level with a job available */ - for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { - if (i == DRM_SCHED_PRIORITY_NORMAL - || atomic_read(&ring->num_jobs[i])) { - ring->priority = i; - ring->funcs->set_priority(ring, i); - break; - } - } - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - -/** - * amdgpu_ring_priority_get - change the ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Request a ring's priority to be raised to @priority (refcounted). - */ -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - if (!ring->funcs->set_priority) - return; - - if (atomic_inc_return(&ring->num_jobs[priority]) <= 0) - return; - - mutex_lock(&ring->priority_mutex); - if (priority <= ring->priority) - goto out_unlock; - - ring->priority = priority; - ring->funcs->set_priority(ring, priority); - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - -/** * amdgpu_ring_init - init driver ring struct. * * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information - * @max_ndw: maximum number of dw for ring alloc - * @nop: nop packet for this ring + * @max_dw: maximum number of dw for ring alloc + * @irq_src: interrupt source to use for this ring + * @irq_type: interrupt type to use for this ring + * @hw_prio: ring priority (NORMAL/HIGH) + * @sched_score: optional score atomic shared with other schedulers * * Initialize the driver information for the selected ring (all asics). * Returns 0 on success, error on failure. */ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, - unsigned max_dw, struct amdgpu_irq_src *irq_src, - unsigned irq_type) + unsigned int max_dw, struct amdgpu_irq_src *irq_src, + unsigned int irq_type, unsigned int hw_prio, + atomic_t *sched_score) { - int r, i; + int r; int sched_hw_submission = amdgpu_sched_hw_submission; + u32 *num_sched; + u32 hw_ip; /* Set the hw submission limit higher for KIQ because * it's used for a number of gfx/compute tasks by both @@ -259,7 +191,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission); + r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission, + sched_score); if (r) return r; } @@ -328,14 +261,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->max_dw = max_dw; - ring->priority = DRM_SCHED_PRIORITY_NORMAL; - mutex_init(&ring->priority_mutex); + ring->hw_prio = hw_prio; - for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) - atomic_set(&ring->num_jobs[i], 0); - - if (amdgpu_debugfs_ring_init(adev, ring)) { - DRM_ERROR("Failed to register debugfs file for rings !\n"); + if (!ring->no_scheduler) { + hw_ip = ring->funcs->type; + num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds; + adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] = + &ring->sched; } return 0; @@ -344,19 +276,19 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /** * amdgpu_ring_fini - tear down the driver ring struct. * - * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * * Tear down the driver information for the selected ring (all asics). */ void amdgpu_ring_fini(struct amdgpu_ring *ring) { - ring->sched.ready = false; /* Not to finish a ring which is not initialized */ if (!(ring->adev) || !(ring->adev->rings[ring->idx])) return; + ring->sched.ready = false; + amdgpu_device_wb_free(ring->adev, ring->rptr_offs); amdgpu_device_wb_free(ring->adev, ring->wptr_offs); @@ -367,8 +299,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) &ring->gpu_addr, (void **)&ring->ring); - amdgpu_debugfs_ring_fini(ring); - dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; ring->me = 0; @@ -379,7 +309,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) /** * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper * - * @adev: amdgpu_device pointer + * @ring: ring to write to * @reg0: register to write * @reg1: register to wait on * @ref: reference value to write/wait on @@ -465,7 +395,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, return result; value = ring->ring[(*pos - 12)/4]; - r = put_user(value, (uint32_t*)buf); + r = put_user(value, (uint32_t *)buf); if (r) return r; buf += 4; @@ -485,32 +415,19 @@ static const struct file_operations amdgpu_debugfs_ring_fops = { #endif -static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring) +void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) - struct drm_minor *minor = adev->ddev->primary; - struct dentry *ent, *root = minor->debugfs_root; + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; char name[32]; sprintf(name, "amdgpu_ring_%s", ring->name); + debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring, + &amdgpu_debugfs_ring_fops, + ring->ring_size + 12); - ent = debugfs_create_file(name, - S_IFREG | S_IRUGO, root, - ring, &amdgpu_debugfs_ring_fops); - if (!ent) - return -ENOMEM; - - i_size_write(ent->d_inode, ring->ring_size + 12); - ring->ent = ent; -#endif - return 0; -} - -static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) -{ -#if defined(CONFIG_DEBUG_FS) - debugfs_remove(ring->ent); #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 930316e60155..4d380e79752c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -30,11 +30,20 @@ /* max number of rings */ #define AMDGPU_MAX_RINGS 28 +#define AMDGPU_MAX_HWIP_RINGS 8 #define AMDGPU_MAX_GFX_RINGS 2 #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 #define AMDGPU_MAX_UVD_ENC_RINGS 2 +enum amdgpu_ring_priority_level { + AMDGPU_RING_PRIO_0, + AMDGPU_RING_PRIO_1, + AMDGPU_RING_PRIO_DEFAULT = 1, + AMDGPU_RING_PRIO_2, + AMDGPU_RING_PRIO_MAX +}; + /* some special values for the owner field */ #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) #define AMDGPU_FENCE_OWNER_VM ((void *)1ul) @@ -44,19 +53,36 @@ #define AMDGPU_FENCE_FLAG_INT (1 << 1) #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) +/* fence flag bit to indicate the face is embedded in job*/ +#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1) + #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) +#define AMDGPU_IB_POOL_SIZE (1024 * 1024) + enum amdgpu_ring_type { - AMDGPU_RING_TYPE_GFX, - AMDGPU_RING_TYPE_COMPUTE, - AMDGPU_RING_TYPE_SDMA, - AMDGPU_RING_TYPE_UVD, - AMDGPU_RING_TYPE_VCE, + AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX, + AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE, + AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA, + AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD, + AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE, + AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC, + AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC, + AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC, + AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG, AMDGPU_RING_TYPE_KIQ, - AMDGPU_RING_TYPE_UVD_ENC, - AMDGPU_RING_TYPE_VCN_DEC, - AMDGPU_RING_TYPE_VCN_ENC, - AMDGPU_RING_TYPE_VCN_JPEG + AMDGPU_RING_TYPE_MES +}; + +enum amdgpu_ib_pool_type { + /* Normal submissions to the top of the pipeline. */ + AMDGPU_IB_POOL_DELAYED, + /* Immediate submissions to the bottom of the pipeline. */ + AMDGPU_IB_POOL_IMMEDIATE, + /* Direct submission to the ring buffer during init and reset. */ + AMDGPU_IB_POOL_DIRECT, + + AMDGPU_IB_POOL_MAX }; struct amdgpu_device; @@ -65,6 +91,11 @@ struct amdgpu_ib; struct amdgpu_cs_parser; struct amdgpu_job; +struct amdgpu_sched { + u32 num_scheds; + struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS]; +}; + /* * Fences. */ @@ -83,20 +114,22 @@ struct amdgpu_fence_driver { struct dma_fence **fences; }; -int amdgpu_fence_driver_init(struct amdgpu_device *adev); -void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, - unsigned num_hw_submission); + unsigned num_hw_submission, + atomic_t *sched_score); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); -void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); -void amdgpu_fence_driver_resume(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, +void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); +void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); +int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); +void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job, unsigned flags); -int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s); +int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, + uint32_t timeout); bool amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, @@ -159,20 +192,21 @@ struct amdgpu_ring_funcs { void (*end_use)(struct amdgpu_ring *ring); void (*emit_switch_buffer) (struct amdgpu_ring *ring); void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); - void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); + void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs); void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask); void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); - void (*emit_tmz)(struct amdgpu_ring *ring, bool start); - /* priority functions */ - void (*set_priority) (struct amdgpu_ring *ring, - enum drm_sched_priority priority); + void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start, + bool secure); /* Try to soft recover the ring to make the fence signal */ void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); int (*preempt_ib)(struct amdgpu_ring *ring); + void (*emit_mem_sync)(struct amdgpu_ring *ring); + void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); }; struct amdgpu_ring { @@ -217,15 +251,8 @@ struct amdgpu_ring { unsigned vm_inv_eng; struct dma_fence *vmid_wait; bool has_compute_vm_bug; - - atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; - struct mutex priority_mutex; - /* protected by priority_mutex */ - int priority; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *ent; -#endif + bool no_scheduler; + int hw_prio; }; #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) @@ -243,11 +270,11 @@ struct amdgpu_ring { #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) -#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) +#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o)) #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) -#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) +#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s)) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) @@ -258,13 +285,10 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority); -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, - unsigned ring_size, struct amdgpu_irq_src *irq_src, - unsigned irq_type); + unsigned int ring_size, struct amdgpu_irq_src *irq_src, + unsigned int irq_type, unsigned int prio, + atomic_t *sched_score); void amdgpu_ring_fini(struct amdgpu_ring *ring); void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, uint32_t reg0, uint32_t val0, @@ -328,4 +352,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, int amdgpu_ring_test_helper(struct amdgpu_ring *ring); +void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index d3d4707f2168..00afd0dcae86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -126,6 +126,10 @@ struct amdgpu_rlc_funcs { void (*stop)(struct amdgpu_device *adev); void (*reset)(struct amdgpu_device *adev); void (*start)(struct amdgpu_device *adev); + void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); + void (*sriov_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip); + u32 (*sriov_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip); + bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); }; struct amdgpu_rlc { @@ -165,12 +169,16 @@ struct amdgpu_rlc { u32 save_restore_list_cntl_size_bytes; u32 save_restore_list_gpm_size_bytes; u32 save_restore_list_srm_size_bytes; + u32 rlc_iram_ucode_size_bytes; + u32 rlc_dram_ucode_size_bytes; u32 *register_list_format; u32 *register_restore; u8 *save_restore_list_cntl; u8 *save_restore_list_gpm; u8 *save_restore_list_srm; + u8 *rlc_iram_ucode; + u8 *rlc_dram_ucode; bool is_rlc_v2_1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 0bd1d4ffc19e..524d10b21041 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -75,7 +75,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, } void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, - struct amdgpu_sa_manager *sa_manager) + struct amdgpu_sa_manager *sa_manager) { struct amdgpu_sa_bo *sa_bo, *tmp; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index c799691dfa84..e9b45089a28a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -29,32 +29,12 @@ #include <drm/amdgpu_drm.h> #include "amdgpu.h" - +#include "amdgpu_sched.h" #include "amdgpu_vm.h" -enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) -{ - switch (amdgpu_priority) { - case AMDGPU_CTX_PRIORITY_VERY_HIGH: - return DRM_SCHED_PRIORITY_HIGH_HW; - case AMDGPU_CTX_PRIORITY_HIGH: - return DRM_SCHED_PRIORITY_HIGH_SW; - case AMDGPU_CTX_PRIORITY_NORMAL: - return DRM_SCHED_PRIORITY_NORMAL; - case AMDGPU_CTX_PRIORITY_LOW: - case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_LOW; - case AMDGPU_CTX_PRIORITY_UNSET: - return DRM_SCHED_PRIORITY_UNSET; - default: - WARN(1, "Invalid context priority %d\n", amdgpu_priority); - return DRM_SCHED_PRIORITY_INVALID; - } -} - static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, int fd, - enum drm_sched_priority priority) + int32_t priority) { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; @@ -81,7 +61,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, int fd, unsigned ctx_id, - enum drm_sched_priority priority) + int32_t priority) { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; @@ -115,28 +95,40 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_sched *args = data; - struct amdgpu_device *adev = dev->dev_private; - enum drm_sched_priority priority; + struct amdgpu_device *adev = drm_to_adev(dev); int r; - priority = amdgpu_to_sched_priority(args->in.priority); - if (priority == DRM_SCHED_PRIORITY_INVALID) + /* First check the op, then the op's argument. + */ + switch (args->in.op) { + case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: + case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: + break; + default: + DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); + return -EINVAL; + } + + if (!amdgpu_ctx_priority_is_valid(args->in.priority)) { + WARN(1, "Invalid context priority %d\n", args->in.priority); return -EINVAL; + } switch (args->in.op) { case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: r = amdgpu_sched_process_priority_override(adev, args->in.fd, - priority); + args->in.priority); break; case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: r = amdgpu_sched_context_priority_override(adev, args->in.fd, args->in.ctx_id, - priority); + args->in.priority); break; default: - DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); + /* Impossible. + */ r = -EINVAL; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h index 12299fd95691..67e5b2472f6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h @@ -30,7 +30,8 @@ enum drm_sched_priority; struct drm_device; struct drm_file; -enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); +int amdgpu_to_sched_priority(int amdgpu_priority, + enum drm_sched_priority *prio); int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 6010999d9020..65debb65a5df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t index = 0; int r; - if (vmid == 0 || !amdgpu_mcbp) + /* don't enable OS preemption on SDMA under SRIOV */ + if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp) return 0; r = amdgpu_sdma_get_index_from_ring(ring, &index); @@ -92,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; struct ras_fs_if fs_info = { .sysfs_name = "sdma_err_count", - .debugfs_name = "sdma_err_inject", }; if (!ih_info) @@ -105,7 +105,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA; adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; adev->sdma.ras_if->sub_block_index = 0; - strcpy(adev->sdma.ras_if->name, "sdma"); } fs_info.head = ih_info->head = *adev->sdma.ras_if; @@ -126,7 +125,7 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, goto free; } - return 0; + return 0; late_fini: amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info); @@ -160,7 +159,7 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_ras_reset_gpu(adev); return AMDGPU_RAS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 761ff8be6314..f8fb755e3aa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -50,15 +50,30 @@ struct amdgpu_sdma_instance { bool burst_nop; }; +struct amdgpu_sdma_ras_funcs { + int (*ras_late_init)(struct amdgpu_device *adev, + void *ras_ih_info); + void (*ras_fini)(struct amdgpu_device *adev); + int (*query_ras_error_count)(struct amdgpu_device *adev, + uint32_t instance, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); +}; + struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; struct amdgpu_irq_src ecc_irq; + struct amdgpu_irq_src vm_hole_irq; + struct amdgpu_irq_src doorbell_invalid_irq; + struct amdgpu_irq_src pool_timeout_irq; + struct amdgpu_irq_src srbm_write_irq; + int num_instances; uint32_t srbm_soft_reset; bool has_page_queue; struct ras_common_if *ras_if; + const struct amdgpu_sdma_ras_funcs *funcs; }; /* @@ -79,7 +94,8 @@ struct amdgpu_buffer_funcs { /* dst addr in bytes */ uint64_t dst_offset, /* number of byte to transfer */ - uint32_t byte_count); + uint32_t byte_count, + bool tmz); /* maximum bytes in a single operation */ uint32_t fill_max_bytes; @@ -97,7 +113,7 @@ struct amdgpu_buffer_funcs { uint32_t byte_count); }; -#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) +#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) struct amdgpu_sdma_instance * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c new file mode 100644 index 000000000000..cc7597a15fe9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -0,0 +1,179 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> + +#include "amdgpu.h" +#include "amdgpu_securedisplay.h" + +/** + * DOC: AMDGPU SECUREDISPLAY debugfs test interface + * + * how to use? + * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test + * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test + * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test + * + * opcode: + * 1:Query whether TA is responding used only for validation pupose + * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is + * send to determine which DIO scratch register should be used to get + * ROI and receive i2c_buf as the output. + * + * You can refer more detail from header file ta_securedisplay_if.h + * + */ + +void psp_securedisplay_parse_resp_status(struct psp_context *psp, + enum ta_securedisplay_status status) +{ + switch (status) { + case TA_SECUREDISPLAY_STATUS__SUCCESS: + break; + case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE: + dev_err(psp->adev->dev, "Secure display: Generic Failure."); + break; + case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER: + dev_err(psp->adev->dev, "Secure display: Invalid Parameter."); + break; + case TA_SECUREDISPLAY_STATUS__NULL_POINTER: + dev_err(psp->adev->dev, "Secure display: Null Pointer."); + break; + case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to write to I2C."); + break; + case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register."); + break; + case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to Read CRC"); + break; + case TA_SECUREDISPLAY_STATUS__I2C_INIT_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to initialize I2C."); + break; + default: + dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status); + } +} + +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, + enum ta_securedisplay_command command_id) +{ + *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf; + memset(*cmd, 0, sizeof(struct securedisplay_cmd)); + (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE; + (*cmd)->cmd_id = command_id; +} + +#if defined(CONFIG_DEBUG_FS) + +static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct psp_context *psp = &adev->psp; + struct securedisplay_cmd *securedisplay_cmd; + struct drm_device *dev = adev_to_drm(adev); + uint32_t phy_id; + uint32_t op; + char str[64]; + int ret; + + if (*pos || size > sizeof(str) - 1) + return -EINVAL; + + memset(str, 0, sizeof(str)); + ret = copy_from_user(str, buf, size); + if (ret) + return -EFAULT; + + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); + return ret; + } + + if (size < 3) + sscanf(str, "%u ", &op); + else + sscanf(str, "%u %u", &op, &phy_id); + + switch (op) { + case 1: + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__QUERY_TA); + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); + if (!ret) { + if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) + dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n", + securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); + else + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + break; + case 2: + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id; + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + if (!ret) { + if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) { + dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is: %*ph\n", + TA_SECUREDISPLAY_I2C_BUFFER_SIZE, + securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf); + } else { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + } + break; + default: + dev_err(adev->dev, "Invalid input: %s\n", str); + } + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return size; +} + +static const struct file_operations amdgpu_securedisplay_debugfs_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_securedisplay_debugfs_write, + .llseek = default_llseek +}; + +#endif + +void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + + if (!adev->psp.securedisplay_context.context.initialized) + return; + + debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root, + adev, &amdgpu_securedisplay_debugfs_ops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h new file mode 100644 index 000000000000..fe98574748f4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _AMDGPU_SECUREDISPLAY_H +#define _AMDGPU_SECUREDISPLAY_H + +#include "amdgpu.h" +#include "ta_secureDisplay_if.h" + +void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev); +void psp_securedisplay_parse_resp_status(struct psp_context *psp, + enum ta_securedisplay_status status); +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, + enum ta_securedisplay_command command_id); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h new file mode 100644 index 000000000000..484bb3dcec47 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h @@ -0,0 +1,40 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_SMUIO_H__ +#define __AMDGPU_SMUIO_H__ + +struct amdgpu_smuio_funcs { + u32 (*get_rom_index_offset)(struct amdgpu_device *adev); + u32 (*get_rom_data_offset)(struct amdgpu_device *adev); + void (*update_rom_clock_gating)(struct amdgpu_device *adev, bool enable); + void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags); + u32 (*get_die_id)(struct amdgpu_device *adev); + u32 (*get_socket_id)(struct amdgpu_device *adev); + bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev); +}; + +struct amdgpu_smuio { + const struct amdgpu_smuio_funcs *funcs; +}; + +#endif /* __AMDGPU_SMUIO_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 95e5e93edd18..862eb3c1c4c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -28,6 +28,8 @@ * Christian König <christian.koenig@amd.com> */ +#include <linux/dma-fence-chain.h> + #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" @@ -35,7 +37,6 @@ struct amdgpu_sync_entry { struct hlist_node node; struct dma_fence *fence; - bool explicit; }; static struct kmem_cache *amdgpu_sync_slab; @@ -79,7 +80,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, /** * amdgpu_sync_get_owner - extract the owner of a fence * - * @fence: fence get the owner from + * @f: fence get the owner from * * Extract who originally created the fence. */ @@ -129,7 +130,7 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep, * Tries to add the fence to an existing hash entry. Returns true when an entry * was found, false otherwise. */ -static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit) +static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) { struct amdgpu_sync_entry *e; @@ -138,10 +139,6 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, continue; amdgpu_sync_keep_later(&e->fence, f); - - /* Preserve eplicit flag to not loose pipe line sync */ - e->explicit |= explicit; - return true; } return false; @@ -151,51 +148,112 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to - * @fence: fence to sync to + * @f: fence to sync to * + * Add the fence to the sync object. */ -int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f, bool explicit) +int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f) { struct amdgpu_sync_entry *e; if (!f) return 0; - if (amdgpu_sync_same_dev(adev, f) && - amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM) - amdgpu_sync_keep_later(&sync->last_vm_update, f); - if (amdgpu_sync_add_later(sync, f, explicit)) + if (amdgpu_sync_add_later(sync, f)) return 0; e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); if (!e) return -ENOMEM; - e->explicit = explicit; - hash_add(sync->fences, &e->node, f->context); e->fence = dma_fence_get(f); return 0; } /** + * amdgpu_sync_vm_fence - remember to sync to this VM fence + * + * @sync: sync object to add fence to + * @fence: the VM fence to add + * + * Add the fence to the sync object and remember it as VM update. + */ +int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) +{ + if (!fence) + return 0; + + amdgpu_sync_keep_later(&sync->last_vm_update, fence); + return amdgpu_sync_fence(sync, fence); +} + +/* Determine based on the owner and mode if we should sync to a fence or not */ +static bool amdgpu_sync_test_fence(struct amdgpu_device *adev, + enum amdgpu_sync_mode mode, + void *owner, struct dma_fence *f) +{ + void *fence_owner = amdgpu_sync_get_owner(f); + + /* Always sync to moves, no matter what */ + if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) + return true; + + /* We only want to trigger KFD eviction fences on + * evict or move jobs. Skip KFD fences otherwise. + */ + if (fence_owner == AMDGPU_FENCE_OWNER_KFD && + owner != AMDGPU_FENCE_OWNER_UNDEFINED) + return false; + + /* Never sync to VM updates either. */ + if (fence_owner == AMDGPU_FENCE_OWNER_VM && + owner != AMDGPU_FENCE_OWNER_UNDEFINED) + return false; + + /* Ignore fences depending on the sync mode */ + switch (mode) { + case AMDGPU_SYNC_ALWAYS: + return true; + + case AMDGPU_SYNC_NE_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner == owner) + return false; + break; + + case AMDGPU_SYNC_EQ_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner != owner) + return false; + break; + + case AMDGPU_SYNC_EXPLICIT: + return false; + } + + WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD, + "Adding eviction fence to sync obj"); + return true; +} + +/** * amdgpu_sync_resv - sync to a reservation object * + * @adev: amdgpu device * @sync: sync object to add fences from reservation object to * @resv: reservation object with embedded fence - * @explicit_sync: true if we should only sync to the exclusive fence + * @mode: how owner affects which fences we sync to + * @owner: owner of the planned job submission * * Sync to the fence */ -int amdgpu_sync_resv(struct amdgpu_device *adev, - struct amdgpu_sync *sync, - struct dma_resv *resv, - void *owner, bool explicit_sync) +int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct dma_resv *resv, enum amdgpu_sync_mode mode, + void *owner) { struct dma_resv_list *flist; struct dma_fence *f; - void *fence_owner; unsigned i; int r = 0; @@ -203,47 +261,35 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, return -EINVAL; /* always sync to the exclusive fence */ - f = dma_resv_get_excl(resv); - r = amdgpu_sync_fence(adev, sync, f, false); + f = dma_resv_excl_fence(resv); + dma_fence_chain_for_each(f, f) { + struct dma_fence_chain *chain = to_dma_fence_chain(f); - flist = dma_resv_get_list(resv); - if (!flist || r) - return r; + if (amdgpu_sync_test_fence(adev, mode, owner, chain ? + chain->fence : f)) { + r = amdgpu_sync_fence(sync, f); + dma_fence_put(f); + if (r) + return r; + break; + } + } + + flist = dma_resv_shared_list(resv); + if (!flist) + return 0; for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], dma_resv_held(resv)); - /* We only want to trigger KFD eviction fences on - * evict or move jobs. Skip KFD fences otherwise. - */ - fence_owner = amdgpu_sync_get_owner(f); - if (fence_owner == AMDGPU_FENCE_OWNER_KFD && - owner != AMDGPU_FENCE_OWNER_UNDEFINED) - continue; - if (amdgpu_sync_same_dev(adev, f)) { - /* VM updates are only interesting - * for other VM updates and moves. - */ - if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) && - (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) && - ((owner == AMDGPU_FENCE_OWNER_VM) != - (fence_owner == AMDGPU_FENCE_OWNER_VM))) - continue; - - /* Ignore fence from the same owner and explicit one as - * long as it isn't undefined. - */ - if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && - (fence_owner == owner || explicit_sync)) - continue; + if (amdgpu_sync_test_fence(adev, mode, owner, f)) { + r = amdgpu_sync_fence(sync, f); + if (r) + return r; } - - r = amdgpu_sync_fence(adev, sync, f, false); - if (r) - break; } - return r; + return 0; } /** @@ -294,11 +340,10 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, * amdgpu_sync_get_fence - get the next fence from the sync object * * @sync: sync object to use - * @explicit: true if the next fence is explicit * * Get and removes the next fence from the sync object not signaled yet. */ -struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit) +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; @@ -307,8 +352,6 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit hash_for_each_safe(sync->fences, i, tmp, e, node) { f = e->fence; - if (explicit) - *explicit = e->explicit; hash_del(&e->node); kmem_cache_free(amdgpu_sync_slab, e); @@ -340,7 +383,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone) hash_for_each_safe(source->fences, i, tmp, e, node) { f = e->fence; if (!dma_fence_is_signaled(f)) { - r = amdgpu_sync_fence(NULL, clone, f, e->explicit); + r = amdgpu_sync_fence(clone, f); if (r) return r; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index b5f1778a2319..7c0fe20c470d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -31,6 +31,13 @@ struct dma_resv; struct amdgpu_device; struct amdgpu_ring; +enum amdgpu_sync_mode { + AMDGPU_SYNC_ALWAYS, + AMDGPU_SYNC_NE_OWNER, + AMDGPU_SYNC_EQ_OWNER, + AMDGPU_SYNC_EXPLICIT +}; + /* * Container for fences used to sync command submissions. */ @@ -40,16 +47,14 @@ struct amdgpu_sync { }; void amdgpu_sync_create(struct amdgpu_sync *sync); -int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f, bool explicit); -int amdgpu_sync_resv(struct amdgpu_device *adev, - struct amdgpu_sync *sync, - struct dma_resv *resv, - void *owner, - bool explicit_sync); +int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f); +int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence); +int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct dma_resv *resv, enum amdgpu_sync_mode mode, + void *owner); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); -struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit); +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone); int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); void amdgpu_sync_free(struct amdgpu_sync *sync); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b158230af8db..909d830b513e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -42,16 +42,11 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) size = 1024 * 1024; /* Number of tests = - * (Total GTT - IB pool - writeback page - ring buffers) / test size + * (Total GTT - gart_pin_size - (2 transfer windows for buffer moves)) / test size */ - n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) - if (adev->rings[i]) - n -= adev->rings[i]->ring_size; - if (adev->wb.wb_obj) - n -= AMDGPU_GPU_PAGE_SIZE; - if (adev->irq.ih.ring_obj) - n -= adev->irq.ih.ring_size; + n = adev->gmc.gart_size - atomic64_read(&adev->gart_pin_size); + n -= AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS * + AMDGPU_GPU_PAGE_SIZE; n /= size; gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); @@ -67,6 +62,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) bp.flags = 0; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); r = amdgpu_bo_create(adev, &bp, &vram_obj); if (r) { @@ -124,7 +120,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); r = amdgpu_copy_buffer(ring, gart_addr, vram_addr, - size, NULL, &fence, false, false); + size, NULL, &fence, false, false, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -157,10 +153,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) i, *vram_start, gart_start, (unsigned long long) (gart_addr - adev->gmc.gart_start + - (void*)gart_start - gtt_map), + (void *)gart_start - gtt_map), (unsigned long long) (vram_addr - adev->gmc.vram_start + - (void*)gart_start - gtt_map)); + (void *)gart_start - gtt_map)); amdgpu_bo_kunmap(vram_obj); goto out_lclean_unpin; } @@ -170,7 +166,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); r = amdgpu_copy_buffer(ring, vram_addr, gart_addr, - size, NULL, &fence, false, false); + size, NULL, &fence, false, false, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); @@ -203,10 +199,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) i, *gart_start, vram_start, (unsigned long long) (vram_addr - adev->gmc.vram_start + - (void*)vram_start - vram_map), + (void *)vram_start - vram_map), (unsigned long long) (gart_addr - adev->gmc.gart_start + - (void*)vram_start - vram_map)); + (void *)vram_start - vram_map)); amdgpu_bo_kunmap(gtt_obj[i]); goto out_lclean_unpin; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 63e734a125fb..d855cb53c7e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _AMDGPU_TRACE_H_ #include <linux/stringify.h> @@ -35,7 +35,7 @@ #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) -TRACE_EVENT(amdgpu_mm_rreg, +TRACE_EVENT(amdgpu_device_rreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), TP_STRUCT__entry( @@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg, (unsigned long)__entry->value) ); -TRACE_EVENT(amdgpu_mm_wreg, +TRACE_EVENT(amdgpu_device_wreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), TP_STRUCT__entry( @@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create, TP_fast_assign( __entry->bo = bo; - __entry->pages = bo->tbo.num_pages; - __entry->type = bo->tbo.mem.mem_type; + __entry->pages = bo->tbo.resource->num_pages; + __entry->type = bo->tbo.resource->mem_type; __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; __entry->visible = bo->flags; @@ -176,10 +176,10 @@ TRACE_EVENT(amdgpu_cs_ioctl, TP_fast_assign( __entry->sched_job_id = job->base.id; - __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) + __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", @@ -201,10 +201,10 @@ TRACE_EVENT(amdgpu_sched_run_job, TP_fast_assign( __entry->sched_job_id = job->base.id; - __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) + __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)); __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name); __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", @@ -229,7 +229,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_fast_assign( __entry->pasid = vm->pasid; - __assign_str(ring, ring->name) + __assign_str(ring, ring->name); __entry->vmid = job->vmid; __entry->vm_hub = ring->funcs->vmhub, __entry->pd_addr = job->vm_pd_addr; @@ -321,17 +321,61 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs, TP_ARGS(mapping) ); +TRACE_EVENT(amdgpu_vm_update_ptes, + TP_PROTO(struct amdgpu_vm_update_params *p, + uint64_t start, uint64_t end, + unsigned int nptes, uint64_t dst, + uint64_t incr, uint64_t flags, + pid_t pid, uint64_t vm_ctx), + TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx), + TP_STRUCT__entry( + __field(u64, start) + __field(u64, end) + __field(u64, flags) + __field(unsigned int, nptes) + __field(u64, incr) + __field(pid_t, pid) + __field(u64, vm_ctx) + __dynamic_array(u64, dst, nptes) + ), + + TP_fast_assign( + unsigned int i; + + __entry->start = start; + __entry->end = end; + __entry->flags = flags; + __entry->incr = incr; + __entry->nptes = nptes; + __entry->pid = pid; + __entry->vm_ctx = vm_ctx; + for (i = 0; i < nptes; ++i) { + u64 addr = p->pages_addr ? amdgpu_vm_map_gart( + p->pages_addr, dst) : dst; + + ((u64 *)__get_dynamic_array(dst))[i] = addr; + dst += incr; + } + ), + TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx," + " flags:0x%llx, incr:%llu, dst:\n%s%s", __entry->pid, + __entry->vm_ctx, __entry->start, __entry->end, + __entry->flags, __entry->incr, __print_array( + __get_dynamic_array(dst), min(__entry->nptes, 32u), 8), + __entry->nptes > 32 ? "..." : "") +); + TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, - uint32_t incr, uint64_t flags, bool direct), - TP_ARGS(pe, addr, count, incr, flags, direct), + uint32_t incr, uint64_t flags, bool immediate), + TP_ARGS(pe, addr, count, incr, flags, immediate), TP_STRUCT__entry( __field(u64, pe) __field(u64, addr) __field(u32, count) __field(u32, incr) __field(u64, flags) - __field(bool, direct) + __field(bool, immediate) ), TP_fast_assign( @@ -340,32 +384,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes, __entry->count = count; __entry->incr = incr; __entry->flags = flags; - __entry->direct = direct; + __entry->immediate = immediate; ), TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, " - "direct=%d", __entry->pe, __entry->addr, __entry->incr, - __entry->flags, __entry->count, __entry->direct) + "immediate=%d", __entry->pe, __entry->addr, __entry->incr, + __entry->flags, __entry->count, __entry->immediate) ); TRACE_EVENT(amdgpu_vm_copy_ptes, - TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct), - TP_ARGS(pe, src, count, direct), + TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool immediate), + TP_ARGS(pe, src, count, immediate), TP_STRUCT__entry( __field(u64, pe) __field(u64, src) __field(u32, count) - __field(bool, direct) + __field(bool, immediate) ), TP_fast_assign( __entry->pe = pe; __entry->src = src; __entry->count = count; - __entry->direct = direct; + __entry->immediate = immediate; ), - TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d", + TP_printk("pe=%010Lx, src=%010Lx, count=%u, immediate=%d", __entry->pe, __entry->src, __entry->count, - __entry->direct) + __entry->immediate) ); TRACE_EVENT(amdgpu_vm_flush, @@ -380,7 +424,7 @@ TRACE_EVENT(amdgpu_vm_flush, ), TP_fast_assign( - __assign_str(ring, ring->name) + __assign_str(ring, ring->name); __entry->vmid = vmid; __entry->vm_hub = ring->funcs->vmhub; __entry->pd_addr = pd_addr; @@ -481,7 +525,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, ), TP_fast_assign( - __assign_str(ring, sched_job->base.sched->name) + __assign_str(ring, sched_job->base.sched->name); __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2616e2eafdeb..c875f1cdd2af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -32,7 +32,6 @@ #include <linux/dma-mapping.h> #include <linux/iommu.h> -#include <linux/hmm.h> #include <linux/pagemap.h> #include <linux/sched/task.h> #include <linux/sched/mm.h> @@ -41,14 +40,14 @@ #include <linux/swap.h> #include <linux/swiotlb.h> #include <linux/dma-buf.h> +#include <linux/sizes.h> +#include <linux/module.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> +#include <drm/ttm/ttm_range_manager.h> -#include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -57,79 +56,26 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_sdma.h" #include "amdgpu_ras.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" -static int amdgpu_map_buffer(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, unsigned num_pages, - uint64_t offset, unsigned window, - struct amdgpu_ring *ring, - uint64_t *addr); +MODULE_IMPORT_NS(DMA_BUF); -static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); -static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); +#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 -static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) -{ - return 0; -} +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem); +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, + struct ttm_tt *ttm); -/** - * amdgpu_init_mem_type - Initialize a memory manager for a specific type of - * memory request. - * - * @bdev: The TTM BO device object (contains a reference to amdgpu_device) - * @type: The type of memory requested - * @man: The memory type manager for each domain - * - * This is called by ttm_bo_init_mm() when a buffer object is being - * initialized. - */ -static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) +static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, + unsigned int type, + uint64_t size_in_page) { - struct amdgpu_device *adev; - - adev = amdgpu_ttm_adev(bdev); - - switch (type) { - case TTM_PL_SYSTEM: - /* System memory */ - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_TT: - /* GTT memory */ - man->func = &amdgpu_gtt_mgr_func; - man->gpu_offset = adev->gmc.gart_start; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; - break; - case TTM_PL_VRAM: - /* "On-card" video ram */ - man->func = &amdgpu_vram_mgr_func; - man->gpu_offset = adev->gmc.vram_start; - man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - break; - case AMDGPU_PL_GDS: - case AMDGPU_PL_GWS: - case AMDGPU_PL_OA: - /* On-chip GDS memory*/ - man->func = &ttm_bo_manager_func; - man->gpu_offset = 0; - man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); - return -EINVAL; - } - return 0; + return ttm_range_man_init(&adev->mman.bdev, type, + false, size_in_page); } /** @@ -148,7 +94,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }; /* Don't handle scatter gather BOs */ @@ -168,7 +115,22 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } abo = ttm_to_amdgpu_bo(bo); - switch (bo->mem.mem_type) { + if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) { + struct dma_fence *fence; + struct dma_resv *resv = &bo->base._resv; + + rcu_read_lock(); + fence = rcu_dereference(resv->fence_excl); + if (fence && !fence->ops->signaled) + dma_fence_enable_sw_signaling(fence); + + placement->num_placement = 0; + placement->num_busy_placement = 0; + rcu_read_unlock(); + return; + } + + switch (bo->resource->mem_type) { case AMDGPU_PL_GDS: case AMDGPU_PL_GWS: case AMDGPU_PL_OA: @@ -190,17 +152,20 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, * BOs to be evicted from VRAM */ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT); + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { /* Move to GTT memory */ - amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); } break; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: default: amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); break; @@ -209,206 +174,191 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } /** - * amdgpu_verify_access - Verify access for a mmap call + * amdgpu_ttm_map_buffer - Map memory into the GART windows + * @bo: buffer object to map + * @mem: memory object to map + * @mm_cur: range to map + * @num_pages: number of pages to map + * @window: which GART window to use + * @ring: DMA ring to use for the copy + * @tmz: if we should setup a TMZ enabled mapping + * @addr: resulting address inside the MC address space * - * @bo: The buffer object to map - * @filp: The file pointer from the process performing the mmap - * - * This is called by ttm_bo_mmap() to verify whether a process - * has the right to mmap a BO to their process space. + * Setup one of the GART windows to access a specific piece of memory or return + * the physical address for local memory. */ -static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) +static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, + struct ttm_resource *mem, + struct amdgpu_res_cursor *mm_cur, + unsigned num_pages, unsigned window, + struct amdgpu_ring *ring, bool tmz, + uint64_t *addr) { - struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); + struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + unsigned num_dw, num_bytes; + struct dma_fence *fence; + uint64_t src_addr, dst_addr; + void *cpu_addr; + uint64_t flags; + unsigned int i; + int r; - /* - * Don't verify access for KFD BOs. They don't have a GEM - * object associated with them. - */ - if (abo->kfd_bo) + BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < + AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); + BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT); + + /* Map only what can't be accessed directly */ + if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { + *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + + mm_cur->start; return 0; + } - if (amdgpu_ttm_tt_get_usermm(bo->ttm)) - return -EPERM; - return drm_vma_node_verify_access(&abo->tbo.base.vma_node, - filp->private_data); -} + *addr = adev->gmc.gart_start; + *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE; + *addr += mm_cur->start & ~PAGE_MASK; -/** - * amdgpu_move_null - Register memory for a buffer object - * - * @bo: The bo to assign the memory to - * @new_mem: The memory to be assigned. - * - * Assign the memory from new_mem to the memory of the buffer object bo. - */ -static void amdgpu_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) -{ - struct ttm_mem_reg *old_mem = &bo->mem; + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); + num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, + AMDGPU_IB_POOL_DELAYED, &job); + if (r) + return r; -/** - * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. - * - * @bo: The bo to assign the memory to. - * @mm_node: Memory manager node for drm allocator. - * @mem: The region where the bo resides. - * - */ -static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, - struct drm_mm_node *mm_node, - struct ttm_mem_reg *mem) -{ - uint64_t addr = 0; + src_addr = num_dw * 4; + src_addr += job->ibs[0].gpu_addr; - if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) { - addr = mm_node->start << PAGE_SHIFT; - addr += bo->bdev->man[mem->mem_type].gpu_offset; - } - return addr; -} + dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, + dst_addr, num_bytes, false); -/** - * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to - * @offset. It also modifies the offset to be within the drm_mm_node returned - * - * @mem: The region where the bo resides. - * @offset: The offset that drm_mm_node is used for finding. - * - */ -static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, - unsigned long *offset) -{ - struct drm_mm_node *mm_node = mem->mm_node; + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); + if (tmz) + flags |= AMDGPU_PTE_TMZ; + + cpu_addr = &job->ibs[0].ptr[num_dw]; + + if (mem->mem_type == TTM_PL_TT) { + dma_addr_t *dma_addr; + + dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; + r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, + cpu_addr); + if (r) + goto error_free; + } else { + dma_addr_t dma_address; - while (*offset >= (mm_node->size << PAGE_SHIFT)) { - *offset -= (mm_node->size << PAGE_SHIFT); - ++mm_node; + dma_address = mm_cur->start; + dma_address += adev->vm_manager.vram_base_offset; + + for (i = 0; i < num_pages; ++i) { + r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, + &dma_address, flags, cpu_addr); + if (r) + goto error_free; + + dma_address += PAGE_SIZE; + } } - return mm_node; + + r = amdgpu_job_submit(job, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + if (r) + goto error_free; + + dma_fence_put(fence); + + return r; + +error_free: + amdgpu_job_free(job); + return r; } /** - * amdgpu_copy_ttm_mem_to_mem - Helper function for copy + * amdgpu_ttm_copy_mem_to_mem - Helper function for copy + * @adev: amdgpu device + * @src: buffer/address where to read from + * @dst: buffer/address where to write to + * @size: number of bytes to copy + * @tmz: if a secure copy should be used + * @resv: resv object to sync to + * @f: Returns the last fence if multiple jobs are submitted. * * The function copies @size bytes from {src->mem + src->offset} to * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a * move and different for a BO to BO copy. * - * @f: Returns the last fence if multiple jobs are submitted. */ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - struct amdgpu_copy_mem *src, - struct amdgpu_copy_mem *dst, - uint64_t size, + const struct amdgpu_copy_mem *src, + const struct amdgpu_copy_mem *dst, + uint64_t size, bool tmz, struct dma_resv *resv, struct dma_fence **f) { + const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE); + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct drm_mm_node *src_mm, *dst_mm; - uint64_t src_node_start, dst_node_start, src_node_size, - dst_node_size, src_page_offset, dst_page_offset; + struct amdgpu_res_cursor src_mm, dst_mm; struct dma_fence *fence = NULL; int r = 0; - const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * - AMDGPU_GPU_PAGE_SIZE); if (!adev->mman.buffer_funcs_enabled) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } - src_mm = amdgpu_find_mm_node(src->mem, &src->offset); - src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + - src->offset; - src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; - src_page_offset = src_node_start & (PAGE_SIZE - 1); - - dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); - dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + - dst->offset; - dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; - dst_page_offset = dst_node_start & (PAGE_SIZE - 1); + amdgpu_res_first(src->mem, src->offset, size, &src_mm); + amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); mutex_lock(&adev->mman.gtt_window_lock); - - while (size) { - unsigned long cur_size; - uint64_t from = src_node_start, to = dst_node_start; + while (src_mm.remaining) { + uint32_t src_page_offset = src_mm.start & ~PAGE_MASK; + uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK; struct dma_fence *next; + uint32_t cur_size; + uint64_t from, to; /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst * begins at an offset, then adjust the size accordingly */ - cur_size = min3(min(src_node_size, dst_node_size), size, - GTT_MAX_BYTES); - if (cur_size + src_page_offset > GTT_MAX_BYTES || - cur_size + dst_page_offset > GTT_MAX_BYTES) - cur_size -= max(src_page_offset, dst_page_offset); - - /* Map only what needs to be accessed. Map src to window 0 and - * dst to window 1 - */ - if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) { - r = amdgpu_map_buffer(src->bo, src->mem, - PFN_UP(cur_size + src_page_offset), - src_node_start, 0, ring, - &from); - if (r) - goto error; - /* Adjust the offset because amdgpu_map_buffer returns - * start of mapped page - */ - from += src_page_offset; - } + cur_size = max(src_page_offset, dst_page_offset); + cur_size = min(min3(src_mm.size, dst_mm.size, size), + (uint64_t)(GTT_MAX_BYTES - cur_size)); + + /* Map src to window 0 and dst to window 1. */ + r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm, + PFN_UP(cur_size + src_page_offset), + 0, ring, tmz, &from); + if (r) + goto error; - if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) { - r = amdgpu_map_buffer(dst->bo, dst->mem, - PFN_UP(cur_size + dst_page_offset), - dst_node_start, 1, ring, - &to); - if (r) - goto error; - to += dst_page_offset; - } + r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, + PFN_UP(cur_size + dst_page_offset), + 1, ring, tmz, &to); + if (r) + goto error; r = amdgpu_copy_buffer(ring, from, to, cur_size, - resv, &next, false, true); + resv, &next, false, true, tmz); if (r) goto error; dma_fence_put(fence); fence = next; - size -= cur_size; - if (!size) - break; - - src_node_size -= cur_size; - if (!src_node_size) { - src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, - src->mem); - src_node_size = (src_mm->size << PAGE_SHIFT); - src_page_offset = 0; - } else { - src_node_start += cur_size; - src_page_offset = src_node_start & (PAGE_SIZE - 1); - } - dst_node_size -= cur_size; - if (!dst_node_size) { - dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, - dst->mem); - dst_node_size = (dst_mm->size << PAGE_SHIFT); - dst_page_offset = 0; - } else { - dst_node_start += cur_size; - dst_page_offset = dst_node_start & (PAGE_SIZE - 1); - } + amdgpu_res_next(&src_mm, cur_size); + amdgpu_res_next(&dst_mm, cur_size); } error: mutex_unlock(&adev->mman.gtt_window_lock); @@ -418,18 +368,19 @@ error: return r; } -/** +/* * amdgpu_move_blit - Copy an entire buffer to another buffer * * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to * help move buffers to and from VRAM. */ static int amdgpu_move_blit(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) + bool evict, + struct ttm_resource *new_mem, + struct ttm_resource *old_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_copy_mem src, dst; struct dma_fence *fence = NULL; int r; @@ -443,14 +394,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, new_mem->num_pages << PAGE_SHIFT, + amdgpu_bo_encrypted(abo), bo->base.resv, &fence); if (r) goto error; /* clear the space being freed */ if (old_mem->mem_type == TTM_PL_VRAM && - (ttm_to_amdgpu_bo(bo)->flags & - AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { + (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { struct dma_fence *wipe_fence = NULL; r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON, @@ -465,9 +416,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, /* Always block for VM page tables before committing the new location */ if (bo->type == ttm_bo_type_kernel) - r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); else - r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); dma_fence_put(fence); return r; @@ -478,118 +429,16 @@ error: return r; } -/** - * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) -{ - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; - struct ttm_place placements; - struct ttm_placement placement; - int r; - - /* create space/pages for new_mem in GTT space */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit from VRAM\n"); - return r; - } - - /* set caching flags */ - r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); - if (unlikely(r)) { - goto out_cleanup; - } - - /* Bind the memory to the GTT space */ - r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); - if (unlikely(r)) { - goto out_cleanup; - } - - /* blit VRAM to GTT */ - r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - /* move BO (in tmp_mem) to new_mem */ - r = ttm_bo_move_ttm(bo, ctx, new_mem); -out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); - return r; -} - -/** - * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) -{ - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; - struct ttm_placement placement; - struct ttm_place placements; - int r; - - /* make space in GTT for old_mem buffer */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit to VRAM\n"); - return r; - } - - /* move/bind old memory to GTT space */ - r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - /* copy to VRAM */ - r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); - return r; -} - -/** +/* * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy * * Called by amdgpu_bo_move() */ static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { - struct drm_mm_node *nodes = mem->mm_node; + uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT; + struct amdgpu_res_cursor cursor; if (mem->mem_type == TTM_PL_SYSTEM || mem->mem_type == TTM_PL_TT) @@ -597,47 +446,67 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, if (mem->mem_type != TTM_PL_VRAM) return false; - /* ttm_mem_reg_ioremap only supports contiguous memory */ - if (nodes->size != mem->num_pages) + amdgpu_res_first(mem, 0, mem_size, &cursor); + + /* ttm_resource_ioremap only supports contiguous memory */ + if (cursor.size != mem_size) return false; - return ((nodes->start + nodes->size) << PAGE_SHIFT) - <= adev->gmc.visible_vram_size; + return cursor.start + cursor.size <= adev->gmc.visible_vram_size; } -/** +/* * amdgpu_bo_move - Move a buffer object to a new memory location * * Called by ttm_bo_handle_move_mem() */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct amdgpu_device *adev; struct amdgpu_bo *abo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; int r; + if (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT) { + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); + if (r) + return r; + } + /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); - if (WARN_ON_ONCE(abo->pin_count > 0)) + if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) return -EINVAL; adev = amdgpu_ttm_adev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - amdgpu_move_null(bo, new_mem); - return 0; + ttm_bo_move_null(bo, new_mem); + goto out; } - if ((old_mem->mem_type == TTM_PL_TT && - new_mem->mem_type == TTM_PL_SYSTEM) || - (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_TT)) { - /* bind is enough */ - amdgpu_move_null(bo, new_mem); - return 0; + if (old_mem->mem_type == TTM_PL_SYSTEM && + (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT)) { + ttm_bo_move_null(bo, new_mem); + goto out; } + if ((old_mem->mem_type == TTM_PL_TT || + old_mem->mem_type == AMDGPU_PL_PREEMPT) && + new_mem->mem_type == TTM_PL_SYSTEM) { + r = ttm_bo_wait_ctx(bo, ctx); + if (r) + return r; + + amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, new_mem); + goto out; + } + if (old_mem->mem_type == AMDGPU_PL_GDS || old_mem->mem_type == AMDGPU_PL_GWS || old_mem->mem_type == AMDGPU_PL_OA || @@ -645,28 +514,37 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_OA) { /* Nothing to save here */ - amdgpu_move_null(bo, new_mem); - return 0; + ttm_bo_move_null(bo, new_mem); + goto out; } - if (!adev->mman.buffer_funcs_enabled) { - r = -ENODEV; - goto memcpy; + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; } - if (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM) { - r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); - } else if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) { - r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); + if (adev->mman.buffer_funcs_enabled) { + if (((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM))) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = TTM_PL_FLAG_TEMPORARY; + return -EMULTIHOP; + } + + r = amdgpu_move_blit(bo, evict, new_mem, old_mem); } else { - r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, - new_mem, old_mem); + r = -ENODEV; } if (r) { -memcpy: /* Check that all memory is CPU accessible */ if (!amdgpu_mem_visible(adev, old_mem) || !amdgpu_mem_visible(adev, new_mem)) { @@ -679,59 +557,43 @@ memcpy: return r; } - if (bo->type == ttm_bo_type_device && - new_mem->mem_type == TTM_PL_VRAM && - old_mem->mem_type != TTM_PL_VRAM) { - /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU - * accesses the BO after it's moved. - */ - abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - } - +out: /* update statistics */ - atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); + atomic64_add(bo->base.size, &adev->num_bytes_moved); + amdgpu_bo_move_notify(bo, evict, new_mem); return 0; } -/** +/* * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() */ -static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, + struct ttm_resource *mem) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct drm_mm_node *mm_node = mem->mm_node; - - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - mem->bus.is_iomem = false; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; + size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; + switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ - if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) + if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) return -EINVAL; - /* Only physically contiguous buffers apply. In a contiguous - * buffer, size of the first mm_node would match the number of - * pages in ttm_mem_reg. - */ + if (adev->mman.aper_base_kaddr && - (mm_node->size == mem->num_pages)) + mem->placement & TTM_PL_FLAG_CONTIGUOUS) mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.offset; - mem->bus.base = adev->gmc.aper_base; + mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; break; default: @@ -740,51 +602,56 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ return 0; } -static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct amdgpu_res_cursor cursor; + + amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, + &cursor); + return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } -static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, - unsigned long page_offset) +/** + * amdgpu_ttm_domain_start - Returns GPU start address + * @adev: amdgpu device object + * @type: type of the memory + * + * Returns: + * GPU start address of a memory domain + */ + +uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) { - struct drm_mm_node *mm; - unsigned long offset = (page_offset << PAGE_SHIFT); + switch (type) { + case TTM_PL_TT: + return adev->gmc.gart_start; + case TTM_PL_VRAM: + return adev->gmc.vram_start; + } - mm = amdgpu_find_mm_node(&bo->mem, &offset); - return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + - (offset >> PAGE_SHIFT); + return 0; } /* * TTM backend functions. */ struct amdgpu_ttm_tt { - struct ttm_dma_tt ttm; + struct ttm_tt ttm; struct drm_gem_object *gobj; u64 offset; uint64_t userptr; struct task_struct *usertask; uint32_t userflags; + bool bound; #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) struct hmm_range *range; #endif }; #ifdef CONFIG_DRM_AMDGPU_USERPTR -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ -}; - -/** +/* * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update * @@ -797,10 +664,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned long start = gtt->userptr; struct vm_area_struct *vma; - struct hmm_range *range; - unsigned long timeout; struct mm_struct *mm; - unsigned long i; + bool readonly; int r = 0; mm = bo->notifier.mm; @@ -816,31 +681,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) if (!mmget_not_zero(mm)) /* Happens during process shutdown */ return -ESRCH; - range = kzalloc(sizeof(*range), GFP_KERNEL); - if (unlikely(!range)) { - r = -ENOMEM; - goto out; - } - range->notifier = &bo->notifier; - range->flags = hmm_range_flags; - range->values = hmm_range_values; - range->pfn_shift = PAGE_SHIFT; - range->start = bo->notifier.interval_tree.start; - range->end = bo->notifier.interval_tree.last + 1; - range->default_flags = hmm_range_flags[HMM_PFN_VALID]; - if (!amdgpu_ttm_tt_is_readonly(ttm)) - range->default_flags |= range->flags[HMM_PFN_WRITE]; - - range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns), - GFP_KERNEL); - if (unlikely(!range->pfns)) { - r = -ENOMEM; - goto out_free_ranges; - } - - down_read(&mm->mmap_sem); - vma = find_vma(mm, start); - if (unlikely(!vma || start < vma->vm_start)) { + mmap_read_lock(mm); + vma = vma_lookup(mm, start); + if (unlikely(!vma)) { r = -EFAULT; goto out_unlock; } @@ -849,54 +692,22 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) r = -EPERM; goto out_unlock; } - up_read(&mm->mmap_sem); - timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); -retry: - range->notifier_seq = mmu_interval_read_begin(&bo->notifier); - - down_read(&mm->mmap_sem); - r = hmm_range_fault(range, 0); - up_read(&mm->mmap_sem); - if (unlikely(r <= 0)) { - /* - * FIXME: This timeout should encompass the retry from - * mmu_interval_read_retry() as well. - */ - if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout)) - goto retry; - goto out_free_pfns; - } - - for (i = 0; i < ttm->num_pages; i++) { - /* FIXME: The pages cannot be touched outside the notifier_lock */ - pages[i] = hmm_device_entry_to_page(range, range->pfns[i]); - if (unlikely(!pages[i])) { - pr_err("Page fault failed for pfn[%lu] = 0x%llx\n", - i, range->pfns[i]); - r = -ENOMEM; - - goto out_free_pfns; - } - } + readonly = amdgpu_ttm_tt_is_readonly(ttm); + r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, + ttm->num_pages, >t->range, readonly, + true, NULL); +out_unlock: + mmap_read_unlock(mm); + if (r) + pr_debug("failed %d to get user pages 0x%lx\n", r, start); - gtt->range = range; mmput(mm); - return 0; - -out_unlock: - up_read(&mm->mmap_sem); -out_free_pfns: - kvfree(range->pfns); -out_free_ranges: - kfree(range); -out: - mmput(mm); return r; } -/** +/* * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change * Check if the pages backing this ttm range have been invalidated * @@ -910,10 +721,10 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) if (!gtt || !gtt->userptr) return false; - DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", + DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", gtt->userptr, ttm->num_pages); - WARN_ONCE(!gtt->range || !gtt->range->pfns, + WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, "No user pages to check\n"); if (gtt->range) { @@ -921,10 +732,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) * FIXME: Must always hold notifier_lock for this, and must * not ignore the return code. */ - r = mmu_interval_read_retry(gtt->range->notifier, - gtt->range->notifier_seq); - kvfree(gtt->range->pfns); - kfree(gtt->range); + r = amdgpu_hmm_range_get_pages_done(gtt->range); gtt->range = NULL; } @@ -932,7 +740,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) } #endif -/** +/* * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. * * Called by amdgpu_cs_list_validate(). This creates the page list @@ -947,65 +755,63 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) ttm->pages[i] = pages ? pages[i] : NULL; } -/** - * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages +/* + * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages * * Called by amdgpu_ttm_backend_bind() **/ -static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) +static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - unsigned nents; - int r; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + int r; /* Allocate an SG array and squash pages into it */ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, - ttm->num_pages << PAGE_SHIFT, + (u64)ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; /* Map SG to device */ - r = -ENOMEM; - nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - if (nents != ttm->sg->nents) + r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); + if (r) goto release_sg; /* convert SG to linear array of pages and dma addresses */ - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; release_sg: kfree(ttm->sg); + ttm->sg = NULL; return r; } -/** +/* * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages */ -static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; /* double check that we don't free the table twice */ - if (!ttm->sg->sgl) + if (!ttm->sg || !ttm->sg->sgl) return; /* unmap the pages mapped to the device */ - dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); sg_free_table(ttm->sg); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) @@ -1014,8 +820,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { if (ttm->pages[i] != - hmm_device_entry_to_page(gtt->range, - gtt->range->pfns[i])) + hmm_pfn_to_page(gtt->range->hmm_pfns[i])) break; } @@ -1024,7 +829,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) #endif } -int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, +static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct ttm_buffer_object *tbo, uint64_t flags) { @@ -1033,59 +838,87 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; - if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { + if (amdgpu_bo_encrypted(abo)) + flags |= AMDGPU_PTE_TMZ; + + if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { uint64_t page_idx = 1; r = amdgpu_gart_bind(adev, gtt->offset, page_idx, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); if (r) goto gart_bind_fail; - /* Patch mtype of the second part BO */ + /* The memory type of the first page defaults to UC. Now + * modify the memory type to NC from the second page of + * the BO onward. + */ flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); r = amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT), ttm->num_pages - page_idx, - &ttm->pages[page_idx], &(gtt->ttm.dma_address[page_idx]), flags); } else { r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); } gart_bind_fail: if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); return r; } -/** +/* * amdgpu_ttm_backend_bind - Bind GTT memory * * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * This handles binding GTT memory to the device address space. */ -static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void*)ttm; uint64_t flags; int r = 0; + if (!bo_mem) + return -EINVAL; + + if (gtt->bound) + return 0; + if (gtt->userptr) { - r = amdgpu_ttm_tt_pin_userptr(ttm); + r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { DRM_ERROR("failed to pin userptr\n"); return r; } + } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { + if (!ttm->sg) { + struct dma_buf_attachment *attach; + struct sg_table *sgt; + + attach = gtt->gobj->import_attach; + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + ttm->sg = sgt; + } + + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); } + if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } @@ -1094,7 +927,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { + if (bo_mem->mem_type != TTM_PL_TT || + !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { gtt->offset = AMDGPU_BO_INVALID_OFFSET; return 0; } @@ -1105,74 +939,76 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, /* bind pages into GART page tables */ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); + gtt->bound = true; return r; } -/** - * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object +/* + * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either + * through AGP or GART aperture. + * + * If bo is accessible through AGP aperture, then use AGP aperture + * to access bo; otherwise allocate logical space in GART aperture + * and map bo to GART aperture. */ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; - struct ttm_mem_reg tmp; + struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; struct ttm_placement placement; struct ttm_place placements; + struct ttm_resource *tmp; uint64_t addr, flags; int r; - if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) + if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) return 0; addr = amdgpu_gmc_agp_addr(bo); if (addr != AMDGPU_BO_INVALID_OFFSET) { - bo->mem.start = addr >> PAGE_SHIFT; - } else { + bo->resource->start = addr >> PAGE_SHIFT; + return 0; + } - /* allocate GART space */ - tmp = bo->mem; - tmp.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | - TTM_PL_FLAG_TT; - - r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); - if (unlikely(r)) - return r; + /* allocate GART space */ + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->resource->placement; - /* compute PTE flags for this buffer object */ - flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); + if (unlikely(r)) + return r; - /* Bind pages */ - gtt->offset = (u64)tmp.start << PAGE_SHIFT; - r = amdgpu_ttm_gart_bind(adev, bo, flags); - if (unlikely(r)) { - ttm_bo_mem_put(bo, &tmp); - return r; - } + /* compute PTE flags for this buffer object */ + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); - ttm_bo_mem_put(bo, &bo->mem); - bo->mem = tmp; + /* Bind pages */ + gtt->offset = (u64)tmp->start << PAGE_SHIFT; + r = amdgpu_ttm_gart_bind(adev, bo, flags); + if (unlikely(r)) { + ttm_resource_free(bo, &tmp); + return r; } - bo->offset = (bo->mem.start << PAGE_SHIFT) + - bo->bdev->man[bo->mem.mem_type].gpu_offset; + amdgpu_gart_invalidate_tlb(adev); + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, tmp); return 0; } -/** +/* * amdgpu_ttm_recover_gart - Rebind GTT pages * * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to @@ -1187,199 +1023,191 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) if (!tbo->ttm) return 0; - flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); + flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); r = amdgpu_ttm_gart_bind(adev, tbo, flags); return r; } -/** +/* * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages * * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * ttm_tt_destroy(). */ -static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; /* if the pages have userptr pinning then clear that first */ - if (gtt->userptr) - amdgpu_ttm_tt_unpin_userptr(ttm); + if (gtt->userptr) { + amdgpu_ttm_tt_unpin_userptr(bdev, ttm); + } else if (ttm->sg && gtt->gobj->import_attach) { + struct dma_buf_attachment *attach; + + attach = gtt->gobj->import_attach; + dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); + ttm->sg = NULL; + } + + if (!gtt->bound) + return; if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) - return 0; + return; /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); if (r) - DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", - gtt->ttm.ttm.num_pages, gtt->offset); - return r; + DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", + gtt->ttm.num_pages, gtt->offset); + gtt->bound = false; } -static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; if (gtt->usertask) put_task_struct(gtt->usertask); - ttm_dma_tt_fini(>t->ttm); + ttm_tt_fini(>t->ttm); kfree(gtt); } -static struct ttm_backend_func amdgpu_backend_func = { - .bind = &amdgpu_ttm_backend_bind, - .unbind = &amdgpu_ttm_backend_unbind, - .destroy = &amdgpu_ttm_backend_destroy, -}; - /** * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO * * @bo: The buffer object to create a GTT ttm_tt object around + * @page_flags: Page flags to be added to the ttm_tt object * * Called by ttm_tt_create(). */ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; + enum ttm_caching caching; gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } - gtt->ttm.ttm.func = &amdgpu_backend_func; gtt->gobj = &bo->base; + if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + caching = ttm_write_combined; + else + caching = ttm_cached; + /* allocate space for the uninitialized page entries */ - if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { + if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { kfree(gtt); return NULL; } - return >t->ttm.ttm; + return >t->ttm; } -/** +/* * amdgpu_ttm_tt_populate - Map GTT pages visible to the device * * Map the pages of a ttm_tt object to an address space visible * to the underlying device. */ -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx) +static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; + pgoff_t i; + int ret; /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ - if (gtt && gtt->userptr) { + if (gtt->userptr) { ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; - - ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm->state = tt_unbound; return 0; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) { - if (!ttm->sg) { - struct dma_buf_attachment *attach; - struct sg_table *sgt; - - attach = gtt->gobj->import_attach; - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); - - ttm->sg = sgt; - } - - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, - ttm->num_pages); - ttm->state = tt_unbound; + if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return 0; - } -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev, ctx); - } -#endif + ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); + if (ret) + return ret; - /* fall back to generic helper to populate the page array - * and map them to the device */ - return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); + for (i = 0; i < ttm->num_pages; ++i) + ttm->pages[i]->mapping = bdev->dev_mapping; + + return 0; } -/** +/* * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays * * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it. */ -static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_device *adev; + pgoff_t i; + + amdgpu_ttm_backend_unbind(bdev, ttm); - if (gtt && gtt->userptr) { + if (gtt->userptr) { amdgpu_ttm_tt_set_user_pages(ttm, NULL); kfree(ttm->sg); - ttm->page_flags &= ~TTM_PAGE_FLAG_SG; - return; - } - - if (ttm->sg && gtt->gobj->import_attach) { - struct dma_buf_attachment *attach; - - attach = gtt->gobj->import_attach; - dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); ttm->sg = NULL; return; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) + if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return; - adev = amdgpu_ttm_adev(ttm->bdev); - -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - ttm_dma_unpopulate(>t->ttm, adev->dev); - return; - } -#endif + for (i = 0; i < ttm->num_pages; ++i) + ttm->pages[i]->mapping = NULL; - /* fall back to generic helper to unmap and unpopulate array */ - ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); + adev = amdgpu_ttm_adev(bdev); + return ttm_pool_free(&adev->mman.bdev.pool, ttm); } /** * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current * task * - * @ttm: The ttm_tt object to bind this userptr object to + * @bo: The ttm_buffer_object to bind this userptr to * @addr: The address in the current tasks VM space to use * @flags: Requirements of userptr object. * * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages * to current task */ -int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, - uint32_t flags) +int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, + uint64_t addr, uint32_t flags) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt; - if (gtt == NULL) - return -EINVAL; + if (!bo->ttm) { + /* TODO: We want a separate TTM object type for userptrs */ + bo->ttm = amdgpu_ttm_tt_create(bo, 0); + if (bo->ttm == NULL) + return -ENOMEM; + } + + /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */ + bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL; + gtt = (void *)bo->ttm; gtt->userptr = addr; gtt->userflags = flags; @@ -1391,7 +1219,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } -/** +/* * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object */ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) @@ -1407,13 +1235,13 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) return gtt->usertask->mm; } -/** +/* * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an * address range for the current task. * */ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end) + unsigned long end, unsigned long *userptr) { struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned long size; @@ -1424,14 +1252,16 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, /* Return false if no part of the ttm_tt object lies within * the range */ - size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; + size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) return false; + if (userptr) + *userptr = gtt->userptr; return true; } -/** +/* * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? */ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) @@ -1444,7 +1274,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) return true; } -/** +/* * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? */ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) @@ -1465,33 +1295,39 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) * * Figure out the flags to use for a VM PDE (Page Directory Entry). */ -uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) { uint64_t flags = 0; if (mem && mem->mem_type != TTM_PL_SYSTEM) flags |= AMDGPU_PTE_VALID; - if (mem && mem->mem_type == TTM_PL_TT) { + if (mem && (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_PREEMPT)) { flags |= AMDGPU_PTE_SYSTEM; - if (ttm->caching_state == tt_cached) + if (ttm->caching == ttm_cached) flags |= AMDGPU_PTE_SNOOPED; } + if (mem && mem->mem_type == TTM_PL_VRAM && + mem->bus.caching == ttm_cached) + flags |= AMDGPU_PTE_SNOOPED; + return flags; } /** * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object * + * @adev: amdgpu_device pointer * @ttm: The ttm_tt object to compute the flags for * @mem: The memory registry backing this ttm_tt object - + * * Figure out the flags to use for a VM PTE (Page Table Entry). */ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); @@ -1504,7 +1340,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, return flags; } -/** +/* * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer * object. * @@ -1516,24 +1352,25 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { - unsigned long num_pages = bo->mem.num_pages; - struct drm_mm_node *node = bo->mem.mm_node; + unsigned long num_pages = bo->resource->num_pages; + struct amdgpu_res_cursor cursor; struct dma_resv_list *flist; struct dma_fence *f; int i; - /* Don't evict VM page tables while they are busy, otherwise we can't - * cleanly handle page faults. - */ + /* Swapout? */ + if (bo->resource->mem_type == TTM_PL_SYSTEM) + return true; + if (bo->type == ttm_bo_type_kernel && - !dma_resv_test_signaled_rcu(bo->base.resv, true)) + !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) return false; /* If bo is a KFD BO, check if the bo belongs to the current process. * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_get_list(bo->base.resv); + flist = dma_resv_shared_list(bo->base.resv); if (flist) { for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], @@ -1543,19 +1380,33 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, } } - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { + case AMDGPU_PL_PREEMPT: + /* Preemptible BOs don't own system resources managed by the + * driver (pages, VRAM, GART space). They point to resources + * owned by someone else (e.g. pageable memory in user mode + * or a DMABuf). They are used in a preemptible context so we + * can guarantee no deadlocks and good QoS in case of MMU + * notifiers or DMABuf move notifiers from the resource owner. + */ + return false; case TTM_PL_TT: + if (amdgpu_bo_is_amdgpu_bo(bo) && + amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) + return false; return true; case TTM_PL_VRAM: /* Check each drm MM node individually */ - while (num_pages) { - if (place->fpfn < (node->start + node->size) && - !(place->lpfn && place->lpfn <= node->start)) + amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT, + &cursor); + while (cursor.remaining) { + if (place->fpfn < PFN_DOWN(cursor.start + cursor.size) + && !(place->lpfn && + place->lpfn <= PFN_DOWN(cursor.start))) return true; - num_pages -= node->size; - ++node; + amdgpu_res_next(&cursor, cursor.size); } return false; @@ -1566,6 +1417,41 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, return ttm_bo_eviction_valuable(bo, place); } +static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) +{ + while (size) { + uint64_t aligned_pos = ALIGN_DOWN(pos, 4); + uint64_t bytes = 4 - (pos & 0x3); + uint32_t shift = (pos & 0x3) * 8; + uint32_t mask = 0xffffffff << shift; + uint32_t value = 0; + + if (size < bytes) { + mask &= 0xffffffff >> (bytes - size) * 8; + bytes = size; + } + + if (mask != 0xffffffff) { + amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false); + if (write) { + value &= ~mask; + value |= (*(uint32_t *)buf << shift) & mask; + amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true); + } else { + value = (value & mask) >> shift; + memcpy(buf, &value, bytes); + } + } else { + amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write); + } + + pos += bytes; + buf += bytes; + size -= bytes; + } +} + /** * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. * @@ -1579,78 +1465,56 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * access for debugging purposes. */ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, - unsigned long offset, - void *buf, int len, int write) + unsigned long offset, void *buf, int len, + int write) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); - struct drm_mm_node *nodes; - uint32_t value = 0; + struct amdgpu_res_cursor cursor; int ret = 0; - uint64_t pos; - unsigned long flags; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return -EIO; - nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); - pos = (nodes->start << PAGE_SHIFT) + offset; - - while (len && pos < adev->gmc.mc_vram_size) { - uint64_t aligned_pos = pos & ~(uint64_t)3; - uint32_t bytes = 4 - (pos & 3); - uint32_t shift = (pos & 3) * 8; - uint32_t mask = 0xffffffff << shift; - - if (len < bytes) { - mask &= 0xffffffff >> (bytes - len) * 8; - bytes = len; + amdgpu_res_first(bo->resource, offset, len, &cursor); + while (cursor.remaining) { + size_t count, size = cursor.size; + loff_t pos = cursor.start; + + count = amdgpu_device_aper_access(adev, pos, buf, size, write); + size -= count; + if (size) { + /* using MM to access rest vram and handle un-aligned address */ + pos += count; + buf += count; + amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write); } - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); - if (!write || mask != 0xffffffff) - value = RREG32_NO_KIQ(mmMM_DATA); - if (write) { - value &= ~mask; - value |= (*(uint32_t *)buf << shift) & mask; - WREG32_NO_KIQ(mmMM_DATA, value); - } - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - if (!write) { - value = (value & mask) >> shift; - memcpy(buf, &value, bytes); - } - - ret += bytes; - buf = (uint8_t *)buf + bytes; - pos += bytes; - len -= bytes; - if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { - ++nodes; - pos = (nodes->start << PAGE_SHIFT); - } + ret += cursor.size; + buf += cursor.size; + amdgpu_res_next(&cursor, cursor.size); } return ret; } -static struct ttm_bo_driver amdgpu_bo_driver = { +static void +amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + amdgpu_bo_move_notify(bo, false, NULL); +} + +static struct ttm_device_funcs amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, - .invalidate_caches = &amdgpu_invalidate_caches, - .init_mem_type = &amdgpu_init_mem_type, + .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, - .verify_access = &amdgpu_verify_access, - .move_notify = &amdgpu_bo_move_notify, + .delete_mem_notify = &amdgpu_bo_delete_mem_notify, .release_notify = &amdgpu_bo_release_notify, - .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, - .io_mem_free = &amdgpu_ttm_io_mem_free, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, .access_memory = &amdgpu_ttm_access_memory, .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify @@ -1668,8 +1532,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = { */ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) { - amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, - NULL, &adev->fw_vram_usage.va); + amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, + NULL, &adev->mman.fw_vram_usage_va); } /** @@ -1683,19 +1547,19 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { uint64_t vram_size = adev->gmc.visible_vram_size; - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; + adev->mman.fw_vram_usage_va = NULL; + adev->mman.fw_vram_usage_reserved_bo = NULL; - if (adev->fw_vram_usage.size == 0 || - adev->fw_vram_usage.size > vram_size) + if (adev->mman.fw_vram_usage_size == 0 || + adev->mman.fw_vram_usage_size > vram_size) return 0; return amdgpu_bo_create_kernel_at(adev, - adev->fw_vram_usage.start_offset, - adev->fw_vram_usage.size, + adev->mman.fw_vram_usage_start_offset, + adev->mman.fw_vram_usage_size, AMDGPU_GEM_DOMAIN_VRAM, - &adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); + &adev->mman.fw_vram_usage_reserved_bo, + &adev->mman.fw_vram_usage_va); } /* @@ -1717,70 +1581,90 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); ctx->c2p_bo = NULL; - amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); - ctx->p2c_bo = NULL; - return 0; } -/** - * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training - * - * @adev: amdgpu_device pointer - * - * create bo vram reservation from memory training. - */ -static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) +static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) { - int ret; struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; memset(ctx, 0, sizeof(*ctx)); - if (!adev->fw_vram_usage.mem_train_support) { - DRM_DEBUG("memory training does not support!\n"); - return 0; - } - ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; - ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); - ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; + ctx->c2p_train_data_offset = + ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M); + ctx->p2c_train_data_offset = + (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); + ctx->train_data_size = + GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", - ctx->train_data_size, - ctx->p2c_train_data_offset, - ctx->c2p_train_data_offset); + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); +} - ret = amdgpu_bo_create_kernel_at(adev, - ctx->p2c_train_data_offset, - ctx->train_data_size, - AMDGPU_GEM_DOMAIN_VRAM, - &ctx->p2c_bo, - NULL); - if (ret) { - DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); - goto Err_out; +/* + * reserve TMR memory at the top of VRAM which holds + * IP Discovery data and is protected by PSP. + */ +static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) +{ + int ret; + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; + bool mem_train_support = false; + + if (!amdgpu_sriov_vf(adev)) { + if (amdgpu_atomfirmware_mem_training_supported(adev)) + mem_train_support = true; + else + DRM_DEBUG("memory training does not support!\n"); } - ret = amdgpu_bo_create_kernel_at(adev, + /* + * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all + * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) + * + * Otherwise, fallback to legacy approach to check and reserve tmr block for ip + * discovery data and G6 memory training data respectively + */ + adev->mman.discovery_tmr_size = + amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); + if (!adev->mman.discovery_tmr_size) + adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET; + + if (mem_train_support) { + /* reserve vram for mem train according to TMR location */ + amdgpu_ttm_training_data_block_init(adev); + ret = amdgpu_bo_create_kernel_at(adev, ctx->c2p_train_data_offset, ctx->train_data_size, AMDGPU_GEM_DOMAIN_VRAM, &ctx->c2p_bo, NULL); + if (ret) { + DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + amdgpu_ttm_training_reserve_vram_fini(adev); + return ret; + } + ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; + } + + ret = amdgpu_bo_create_kernel_at(adev, + adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, + adev->mman.discovery_tmr_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.discovery_memory, + NULL); if (ret) { - DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); - goto Err_out; + DRM_ERROR("alloc tmr failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + return ret; } - ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; return 0; - -Err_out: - amdgpu_ttm_training_reserve_vram_fini(adev); - return ret; } -/** +/* * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. * @@ -1794,15 +1678,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) uint64_t gtt_size; int r; u64 vis_vram_limit; - void *stolen_vga_buf; mutex_init(&adev->mman.gtt_window_lock); /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&adev->mman.bdev, - &amdgpu_bo_driver, - adev->ddev->anon_inode->i_mapping, - adev->ddev->vma_offset_manager, + r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, + adev_to_drm(adev)->anon_inode->i_mapping, + adev_to_drm(adev)->vma_offset_manager, + adev->need_swiotlb, dma_addressing_limited(adev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -1810,12 +1693,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } adev->mman.initialized = true; - /* We opt to avoid OOM on system pages allocations */ - adev->mman.bdev.no_retry = true; - /* Initialize VRAM pool with all of VRAM divided into pages */ - r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, - adev->gmc.real_vram_size >> PAGE_SHIFT); + r = amdgpu_vram_mgr_init(adev); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; @@ -1830,8 +1709,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_buffer_funcs_status(adev, false); #ifdef CONFIG_64BIT - adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, - adev->gmc.visible_vram_size); +#ifdef CONFIG_X86 + if (adev->gmc.xgmi.connected_to_cpu) + adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, + adev->gmc.visible_vram_size); + + else +#endif + adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, + adev->gmc.visible_vram_size); #endif /* @@ -1844,33 +1730,37 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } /* - *The reserved vram for memory training must be pinned to the specified - *place on the VRAM, so reserve it early. + * only NAVI10 and onwards ASIC support for IP discovery. + * If IP discovery enabled, a block of memory should be + * reserved for IP discovey. */ - r = amdgpu_ttm_training_reserve_vram_init(adev); - if (r) - return r; + if (adev->mman.discovery_bin) { + r = amdgpu_ttm_reserve_tmr(adev); + if (r) + return r; + } /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS * and driver. */ - r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->stolen_vga_memory, - NULL, &stolen_vga_buf); + r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_vga_memory, + NULL); if (r) return r; - - /* - * reserve one TMR (64K) memory at the top of VRAM which holds - * IP Discovery data and is protected by PSP. - */ - r = amdgpu_bo_create_kernel_at(adev, - adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, - DISCOVERY_TMR_SIZE, + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, + adev->mman.stolen_extended_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_extended_memory, + NULL); + if (r) + return r; + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, + adev->mman.stolen_reserved_size, AMDGPU_GEM_DOMAIN_VRAM, - &adev->discovery_memory, + &adev->mman.stolen_reserved_memory, NULL); if (r) return r; @@ -1892,7 +1782,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) gtt_size = (uint64_t)amdgpu_gtt_size << 20; /* Initialize GTT memory pool */ - r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); + r = amdgpu_gtt_mgr_init(adev, gtt_size); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; @@ -1900,48 +1790,36 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned)(gtt_size / (1024 * 1024))); + /* Initialize preemptible memory pool */ + r = amdgpu_preempt_mgr_init(adev); + if (r) { + DRM_ERROR("Failed initializing PREEMPT heap.\n"); + return r; + } + /* Initialize various on-chip memory pools */ - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, - adev->gds.gds_size); + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) { DRM_ERROR("Failed initializing GDS heap.\n"); return r; } - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, - adev->gds.gws_size); + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); if (r) { DRM_ERROR("Failed initializing gws heap.\n"); return r; } - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, - adev->gds.oa_size); + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); if (r) { DRM_ERROR("Failed initializing oa heap.\n"); return r; } - /* Register debugfs entries for amdgpu_ttm */ - r = amdgpu_ttm_debugfs_init(adev); - if (r) { - DRM_ERROR("Failed to init debugfs\n"); - return r; - } return 0; } -/** - * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm - */ -void amdgpu_ttm_late_init(struct amdgpu_device *adev) -{ - void *stolen_vga_buf; - /* return the VGA stolen memory (if any) back to VRAM */ - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); -} - -/** +/* * amdgpu_ttm_fini - De-initialize the TTM memory pools */ void amdgpu_ttm_fini(struct amdgpu_device *adev) @@ -1949,22 +1827,24 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->mman.initialized) return; - amdgpu_ttm_debugfs_fini(adev); amdgpu_ttm_training_reserve_vram_fini(adev); + /* return the stolen vga memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); /* return the IP Discovery TMR memory back to VRAM */ - amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + if (adev->mman.stolen_reserved_size) + amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, + NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); - if (adev->mman.aper_base_kaddr) - iounmap(adev->mman.aper_base_kaddr); - adev->mman.aper_base_kaddr = NULL; - - ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); - ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); - ttm_bo_device_release(&adev->mman.bdev); + amdgpu_vram_mgr_fini(adev); + amdgpu_gtt_mgr_fini(adev); + amdgpu_preempt_mgr_fini(adev); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); + ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; DRM_INFO("amdgpu: ttm finalized\n"); } @@ -1980,21 +1860,23 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) */ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) { - struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); uint64_t size; int r; - if (!adev->mman.initialized || adev->in_gpu_reset || + if (!adev->mman.initialized || amdgpu_in_reset(adev) || adev->mman.buffer_funcs_enabled == enable) return; if (enable) { struct amdgpu_ring *ring; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; ring = adev->mman.buffer_funcs_ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); + sched = &ring->sched; + r = drm_sched_entity_init(&adev->mman.entity, + DRM_SCHED_PRIORITY_KERNEL, &sched, + 1, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", r); @@ -2015,86 +1897,14 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) adev->mman.buffer_funcs_enabled = enable; } -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv = filp->private_data; - struct amdgpu_device *adev = file_priv->minor->dev->dev_private; - - if (adev == NULL) - return -EINVAL; - - return ttm_bo_mmap(filp, vma, &adev->mman.bdev); -} - -static int amdgpu_map_buffer(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, unsigned num_pages, - uint64_t offset, unsigned window, - struct amdgpu_ring *ring, - uint64_t *addr) -{ - struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - struct amdgpu_device *adev = ring->adev; - struct ttm_tt *ttm = bo->ttm; - struct amdgpu_job *job; - unsigned num_dw, num_bytes; - dma_addr_t *dma_address; - struct dma_fence *fence; - uint64_t src_addr, dst_addr; - uint64_t flags; - int r; - - BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < - AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); - - *addr = adev->gmc.gart_start; - *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * - AMDGPU_GPU_PAGE_SIZE; - - num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); - num_bytes = num_pages * 8; - - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); - if (r) - return r; - - src_addr = num_dw * 4; - src_addr += job->ibs[0].gpu_addr; - - dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); - dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; - amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, - dst_addr, num_bytes); - - amdgpu_ring_pad_ib(ring, &job->ibs[0]); - WARN_ON(job->ibs[0].length_dw > num_dw); - - dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; - flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); - r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, - &job->ibs[0].ptr[num_dw]); - if (r) - goto error_free; - - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - - dma_fence_put(fence); - - return r; - -error_free: - amdgpu_job_free(job); - return r; -} - int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, - bool vm_needs_flush) + bool vm_needs_flush, bool tmz) { + enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : + AMDGPU_IB_POOL_DELAYED; struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -2112,18 +1922,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job); if (r) return r; if (vm_needs_flush) { - job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); + job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? + adev->gmc.pdb0_bo : adev->gart.bo); job->vm_needs_flush = true; } if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED, - false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; @@ -2134,7 +1945,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint32_t cur_size_in_bytes = min(byte_count, max_bytes); amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, - dst_offset, cur_size_in_bytes); + dst_offset, cur_size_in_bytes, tmz); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; @@ -2168,9 +1979,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct drm_mm_node *mm_node; - unsigned long num_pages; + struct amdgpu_res_cursor cursor; unsigned int num_loops, num_dw; + uint64_t num_bytes; struct amdgpu_job *job; int r; @@ -2180,61 +1991,56 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return -EINVAL; } - if (bo->tbo.mem.mem_type == TTM_PL_TT) { + if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) { + DRM_ERROR("Trying to clear preemptible memory.\n"); + return -EINVAL; + } + + if (bo->tbo.resource->mem_type == TTM_PL_TT) { r = amdgpu_ttm_alloc_gart(&bo->tbo); if (r) return r; } - num_pages = bo->tbo.num_pages; - mm_node = bo->tbo.mem.mm_node; + num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT; num_loops = 0; - while (num_pages) { - uint64_t byte_count = mm_node->size << PAGE_SHIFT; - num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes); - num_pages -= mm_node->size; - ++mm_node; + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); + while (cursor.remaining) { + num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes); + amdgpu_res_next(&cursor, cursor.size); } num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; /* for IB padding */ num_dw += 64; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, + &job); if (r) return r; if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED, false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; } } - num_pages = bo->tbo.num_pages; - mm_node = bo->tbo.mem.mm_node; - - while (num_pages) { - uint64_t byte_count = mm_node->size << PAGE_SHIFT; - uint64_t dst_addr; - - dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); - while (byte_count) { - uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count, - max_bytes); - - amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, - dst_addr, cur_size_in_bytes); + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); + while (cursor.remaining) { + uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes); + uint64_t dst_addr = cursor.start; - dst_addr += cur_size_in_bytes; - byte_count -= cur_size_in_bytes; - } + dst_addr += amdgpu_ttm_domain_start(adev, + bo->tbo.resource->mem_type); + amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, + cur_size); - num_pages -= mm_node->size; - ++mm_node; + amdgpu_res_next(&cursor, cur_size); } amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -2251,34 +2057,108 @@ error_free: return r; } +/** + * amdgpu_ttm_evict_resources - evict memory buffers + * @adev: amdgpu device object + * @mem_type: evicted BO's memory type + * + * Evicts all @mem_type buffers on the lru list of the memory type. + * + * Returns: + * 0 for success or a negative error code on failure. + */ +int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) +{ + struct ttm_resource_manager *man; + + switch (mem_type) { + case TTM_PL_VRAM: + case TTM_PL_TT: + case AMDGPU_PL_GWS: + case AMDGPU_PL_GDS: + case AMDGPU_PL_OA: + man = ttm_manager_type(&adev->mman.bdev, mem_type); + break; + default: + DRM_ERROR("Trying to evict invalid memory type\n"); + return -EINVAL; + } + + return ttm_resource_manager_evict_all(&adev->mman.bdev, man); +} + #if defined(CONFIG_DEBUG_FS) -static int amdgpu_mm_dump_table(struct seq_file *m, void *data) +static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - unsigned ttm_pl = (uintptr_t)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + TTM_PL_VRAM); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); return 0; } -static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { - {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, - {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, - {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, - {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, - {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, - {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, -#ifdef CONFIG_SWIOTLB - {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} -#endif -}; +static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; -/** + return ttm_pool_debugfs(&adev->mman.bdev.pool, m); +} + +static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + TTM_PL_TT); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GDS); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GWS); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_OA); + struct drm_printer p = drm_seq_file_printer(m); + + man->func->debug(man, &p); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table); +DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); + +/* * amdgpu_ttm_vram_read - Linear read access to VRAM * * Accesses VRAM via MMIO for debugging purposes. @@ -2288,7 +2168,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, { struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; - int r; if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -2296,33 +2175,25 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, if (*pos >= adev->gmc.mc_vram_size) return -ENXIO; + size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); while (size) { - unsigned long flags; - uint32_t value; - - if (*pos >= adev->gmc.mc_vram_size) - return result; + size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); + uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); - value = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - - r = put_user(value, (uint32_t *)buf); - if (r) - return r; + amdgpu_device_vram_access(adev, *pos, value, bytes, false); + if (copy_to_user(buf, value, bytes)) + return -EFAULT; - result += 4; - buf += 4; - *pos += 4; - size -= 4; + result += bytes; + buf += bytes; + *pos += bytes; + size -= bytes; } return result; } -/** +/* * amdgpu_ttm_vram_write - Linear write access to VRAM * * Accesses VRAM via MMIO for debugging purposes. @@ -2341,7 +2212,6 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, return -ENXIO; while (size) { - unsigned long flags; uint32_t value; if (*pos >= adev->gmc.mc_vram_size) @@ -2351,11 +2221,7 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, if (r) return r; - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); - WREG32_NO_KIQ(mmMM_DATA, value); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + amdgpu_device_mm_access(adev, *pos, &value, 4, true); result += 4; buf += 4; @@ -2373,59 +2239,7 @@ static const struct file_operations amdgpu_ttm_vram_fops = { .llseek = default_llseek, }; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - -/** - * amdgpu_ttm_gtt_read - Linear read access to GTT memory - */ -static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - while (size) { - loff_t p = *pos / PAGE_SIZE; - unsigned off = *pos & ~PAGE_MASK; - size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); - struct page *page; - void *ptr; - - if (p >= adev->gart.num_cpu_pages) - return result; - - page = adev->gart.pages[p]; - if (page) { - ptr = kmap(page); - ptr += off; - - r = copy_to_user(buf, ptr, cur_size); - kunmap(adev->gart.pages[p]); - } else - r = clear_user(buf, cur_size); - - if (r) - return -EFAULT; - - result += cur_size; - buf += cur_size; - *pos += cur_size; - size -= cur_size; - } - - return result; -} - -static const struct file_operations amdgpu_ttm_gtt_fops = { - .owner = THIS_MODULE, - .read = amdgpu_ttm_gtt_read, - .llseek = default_llseek -}; - -#endif - -/** +/* * amdgpu_iomem_read - Virtual read access to GPU mapped memory * * This function is used to read memory that has been mapped to the @@ -2481,7 +2295,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, return result; } -/** +/* * amdgpu_iomem_write - Virtual write access to GPU mapped memory * * This function is used to write memory that has been mapped to the @@ -2539,62 +2353,29 @@ static const struct file_operations amdgpu_ttm_iomem_fops = { .llseek = default_llseek }; -static const struct { - char *name; - const struct file_operations *fops; - int domain; -} ttm_debugfs_entries[] = { - { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, -#endif - { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, -}; - #endif -static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) +void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - unsigned count; - - struct drm_minor *minor = adev->ddev->primary; - struct dentry *ent, *root = minor->debugfs_root; - - for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { - ent = debugfs_create_file( - ttm_debugfs_entries[count].name, - S_IFREG | S_IRUGO, root, - adev, - ttm_debugfs_entries[count].fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) - i_size_write(ent->d_inode, adev->gmc.mc_vram_size); - else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) - i_size_write(ent->d_inode, adev->gmc.gart_size); - adev->mman.debugfs_entries[count] = ent; - } - - count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); - -#ifdef CONFIG_SWIOTLB - if (!(adev->need_swiotlb && swiotlb_nr_tbl())) - --count; -#endif - - return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); -#else - return 0; -#endif -} - -static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned i; - - for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) - debugfs_remove(adev->mman.debugfs_entries[i]); + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file_size("amdgpu_vram", 0444, root, adev, + &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); + debugfs_create_file("amdgpu_iomem", 0444, root, adev, + &amdgpu_ttm_iomem_fops); + debugfs_create_file("amdgpu_vram_mm", 0444, root, adev, + &amdgpu_mm_vram_table_fops); + debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev, + &amdgpu_mm_tt_table_fops); + debugfs_create_file("amdgpu_gds_mm", 0444, root, adev, + &amdgpu_mm_gds_table_fops); + debugfs_create_file("amdgpu_gws_mm", 0444, root, adev, + &amdgpu_mm_gws_table_fops); + debugfs_create_file("amdgpu_oa_mm", 0444, root, adev, + &amdgpu_mm_oa_table_fops); + debugfs_create_file("ttm_page_pool", 0444, root, adev, + &amdgpu_ttm_page_pool_fops); #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 0dddedc06ae3..7346ecff4438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -24,32 +24,47 @@ #ifndef __AMDGPU_TTM_H__ #define __AMDGPU_TTM_H__ -#include "amdgpu.h" +#include <linux/dma-direction.h> #include <drm/gpu_scheduler.h> +#include "amdgpu.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) - -#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0) -#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) -#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) +#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 #define AMDGPU_POISON 0xd0bed0be +struct amdgpu_vram_mgr { + struct ttm_resource_manager manager; + struct drm_mm mm; + spinlock_t lock; + struct list_head reservations_pending; + struct list_head reserved_pages; + atomic64_t usage; + atomic64_t vis_usage; +}; + +struct amdgpu_gtt_mgr { + struct ttm_resource_manager manager; + struct drm_mm mm; + spinlock_t lock; + atomic64_t used; +}; + +struct amdgpu_preempt_mgr { + struct ttm_resource_manager manager; + atomic64_t used; +}; + struct amdgpu_mman { - struct ttm_bo_device bdev; - bool mem_global_referenced; + struct ttm_device bdev; bool initialized; void __iomem *aper_base_kaddr; -#if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_entries[8]; -#endif - /* buffer handling */ const struct amdgpu_buffer_funcs *buffer_funcs; struct amdgpu_ring *buffer_funcs_ring; @@ -58,27 +73,70 @@ struct amdgpu_mman { struct mutex gtt_window_lock; /* Scheduler entity for buffer moves */ struct drm_sched_entity entity; + + struct amdgpu_vram_mgr vram_mgr; + struct amdgpu_gtt_mgr gtt_mgr; + struct amdgpu_preempt_mgr preempt_mgr; + + uint64_t stolen_vga_size; + struct amdgpu_bo *stolen_vga_memory; + uint64_t stolen_extended_size; + struct amdgpu_bo *stolen_extended_memory; + bool keep_stolen_vga_memory; + + struct amdgpu_bo *stolen_reserved_memory; + uint64_t stolen_reserved_offset; + uint64_t stolen_reserved_size; + + /* discovery */ + uint8_t *discovery_bin; + uint32_t discovery_tmr_size; + struct amdgpu_bo *discovery_memory; + + /* firmware VRAM reservation */ + u64 fw_vram_usage_start_offset; + u64 fw_vram_usage_size; + struct amdgpu_bo *fw_vram_usage_reserved_bo; + void *fw_vram_usage_va; }; struct amdgpu_copy_mem { struct ttm_buffer_object *bo; - struct ttm_mem_reg *mem; + struct ttm_resource *mem; unsigned long offset; }; -extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; -extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; +int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size); +void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); +int amdgpu_preempt_mgr_init(struct amdgpu_device *adev); +void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev); +int amdgpu_vram_mgr_init(struct amdgpu_device *adev); +void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); + +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); +uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); +int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, + struct ttm_resource *mem, + u64 offset, u64 size, + struct device *dev, + enum dma_data_direction dir, + struct sg_table **sgt); +void amdgpu_vram_mgr_free_sgt(struct device *dev, + enum dma_data_direction dir, + struct sg_table *sgt); +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man); +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man); +int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, + uint64_t start, uint64_t size); +int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, + uint64_t start); int amdgpu_ttm_init(struct amdgpu_device *adev); -void amdgpu_ttm_late_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable); @@ -87,11 +145,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, - bool vm_needs_flush); + bool vm_needs_flush, bool tmz); int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - struct amdgpu_copy_mem *src, - struct amdgpu_copy_mem *dst, - uint64_t size, + const struct amdgpu_copy_mem *src, + const struct amdgpu_copy_mem *dst, + uint64_t size, bool tmz, struct dma_resv *resv, struct dma_fence **f); int amdgpu_fill_buffer(struct amdgpu_bo *bo, @@ -99,9 +157,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, struct dma_resv *resv, struct dma_fence **fence); -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); +uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); @@ -119,18 +177,21 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) #endif void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); -int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, - uint32_t flags); +int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, + uint64_t addr, uint32_t flags); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end); + unsigned long end, unsigned long *userptr); bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); -uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem); +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); +int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type); + +void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 833fc4b68940..ca3350502618 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -68,23 +68,32 @@ void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr) { uint16_t version_major = le16_to_cpu(hdr->header_version_major); uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); + const struct smc_firmware_header_v1_0 *v1_0_hdr; + const struct smc_firmware_header_v2_0 *v2_0_hdr; + const struct smc_firmware_header_v2_1 *v2_1_hdr; DRM_DEBUG("SMC\n"); amdgpu_ucode_print_common_hdr(hdr); if (version_major == 1) { - const struct smc_firmware_header_v1_0 *smc_hdr = - container_of(hdr, struct smc_firmware_header_v1_0, header); - - DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr)); + v1_0_hdr = container_of(hdr, struct smc_firmware_header_v1_0, header); + DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(v1_0_hdr->ucode_start_addr)); } else if (version_major == 2) { - const struct smc_firmware_header_v1_0 *v1_hdr = - container_of(hdr, struct smc_firmware_header_v1_0, header); - const struct smc_firmware_header_v2_0 *v2_hdr = - container_of(v1_hdr, struct smc_firmware_header_v2_0, v1_0); + switch (version_minor) { + case 0: + v2_0_hdr = container_of(hdr, struct smc_firmware_header_v2_0, v1_0.header); + DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_offset_bytes)); + DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_size_bytes)); + break; + case 1: + v2_1_hdr = container_of(hdr, struct smc_firmware_header_v2_1, v1_0.header); + DRM_DEBUG("pptable_count: %u\n", le32_to_cpu(v2_1_hdr->pptable_count)); + DRM_DEBUG("pptable_entry_offset: %u\n", le32_to_cpu(v2_1_hdr->pptable_entry_offset)); + break; + default: + break; + } - DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_offset_bytes)); - DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_size_bytes)); } else { DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor); } @@ -248,36 +257,60 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr) container_of(hdr, struct psp_firmware_header_v1_0, header); DRM_DEBUG("ucode_feature_version: %u\n", - le32_to_cpu(psp_hdr->ucode_feature_version)); + le32_to_cpu(psp_hdr->sos.fw_version)); DRM_DEBUG("sos_offset_bytes: %u\n", - le32_to_cpu(psp_hdr->sos_offset_bytes)); + le32_to_cpu(psp_hdr->sos.offset_bytes)); DRM_DEBUG("sos_size_bytes: %u\n", - le32_to_cpu(psp_hdr->sos_size_bytes)); + le32_to_cpu(psp_hdr->sos.size_bytes)); if (version_minor == 1) { const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0); DRM_DEBUG("toc_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_1->toc_header_version)); + le32_to_cpu(psp_hdr_v1_1->toc.fw_version)); DRM_DEBUG("toc_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->toc_offset_bytes)); + le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes)); DRM_DEBUG("toc_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->toc_size_bytes)); + le32_to_cpu(psp_hdr_v1_1->toc.size_bytes)); DRM_DEBUG("kdb_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_1->kdb_header_version)); + le32_to_cpu(psp_hdr_v1_1->kdb.fw_version)); DRM_DEBUG("kdb_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->kdb_offset_bytes)); + le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes)); DRM_DEBUG("kdb_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes)); + le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes)); } if (version_minor == 2) { const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 = container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0); DRM_DEBUG("kdb_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_2->kdb_header_version)); + le32_to_cpu(psp_hdr_v1_2->kdb.fw_version)); + DRM_DEBUG("kdb_offset_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes)); + DRM_DEBUG("kdb_size_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes)); + } + if (version_minor == 3) { + const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = + container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0); + const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 = + container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1); + DRM_DEBUG("toc_header_version: %u\n", + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version)); + DRM_DEBUG("toc_offset_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes)); + DRM_DEBUG("toc_size_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes)); + DRM_DEBUG("kdb_header_version: %u\n", + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version)); DRM_DEBUG("kdb_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_2->kdb_offset_bytes)); + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes)); DRM_DEBUG("kdb_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_2->kdb_size_bytes)); + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes)); + DRM_DEBUG("spl_header_version: %u\n", + le32_to_cpu(psp_hdr_v1_3->spl.fw_version)); + DRM_DEBUG("spl_offset_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes)); + DRM_DEBUG("spl_size_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_3->spl.size_bytes)); } } else { DRM_ERROR("Unknown PSP ucode version: %u.%u\n", @@ -365,16 +398,107 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + case CHIP_VANGOGH: + case CHIP_DIMGREY_CAVEFISH: + case CHIP_ALDEBARAN: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; - + case CHIP_CYAN_SKILLFISH: + if (!(load_type && + adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)) + return AMDGPU_FW_LOAD_DIRECT; + else + return AMDGPU_FW_LOAD_PSP; default: - DRM_ERROR("Unknown firmware load type\n"); + if (!load_type) + return AMDGPU_FW_LOAD_DIRECT; + else + return AMDGPU_FW_LOAD_PSP; } +} - return AMDGPU_FW_LOAD_DIRECT; +const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) +{ + switch (ucode_id) { + case AMDGPU_UCODE_ID_SDMA0: + return "SDMA0"; + case AMDGPU_UCODE_ID_SDMA1: + return "SDMA1"; + case AMDGPU_UCODE_ID_SDMA2: + return "SDMA2"; + case AMDGPU_UCODE_ID_SDMA3: + return "SDMA3"; + case AMDGPU_UCODE_ID_SDMA4: + return "SDMA4"; + case AMDGPU_UCODE_ID_SDMA5: + return "SDMA5"; + case AMDGPU_UCODE_ID_SDMA6: + return "SDMA6"; + case AMDGPU_UCODE_ID_SDMA7: + return "SDMA7"; + case AMDGPU_UCODE_ID_CP_CE: + return "CP_CE"; + case AMDGPU_UCODE_ID_CP_PFP: + return "CP_PFP"; + case AMDGPU_UCODE_ID_CP_ME: + return "CP_ME"; + case AMDGPU_UCODE_ID_CP_MEC1: + return "CP_MEC1"; + case AMDGPU_UCODE_ID_CP_MEC1_JT: + return "CP_MEC1_JT"; + case AMDGPU_UCODE_ID_CP_MEC2: + return "CP_MEC2"; + case AMDGPU_UCODE_ID_CP_MEC2_JT: + return "CP_MEC2_JT"; + case AMDGPU_UCODE_ID_CP_MES: + return "CP_MES"; + case AMDGPU_UCODE_ID_CP_MES_DATA: + return "CP_MES_DATA"; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: + return "RLC_RESTORE_LIST_CNTL"; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: + return "RLC_RESTORE_LIST_GPM_MEM"; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: + return "RLC_RESTORE_LIST_SRM_MEM"; + case AMDGPU_UCODE_ID_RLC_IRAM: + return "RLC_IRAM"; + case AMDGPU_UCODE_ID_RLC_DRAM: + return "RLC_DRAM"; + case AMDGPU_UCODE_ID_RLC_G: + return "RLC_G"; + case AMDGPU_UCODE_ID_STORAGE: + return "STORAGE"; + case AMDGPU_UCODE_ID_SMC: + return "SMC"; + case AMDGPU_UCODE_ID_UVD: + return "UVD"; + case AMDGPU_UCODE_ID_UVD1: + return "UVD1"; + case AMDGPU_UCODE_ID_VCE: + return "VCE"; + case AMDGPU_UCODE_ID_VCN: + return "VCN"; + case AMDGPU_UCODE_ID_VCN1: + return "VCN1"; + case AMDGPU_UCODE_ID_DMCU_ERAM: + return "DMCU_ERAM"; + case AMDGPU_UCODE_ID_DMCU_INTV: + return "DMCU_INTV"; + case AMDGPU_UCODE_ID_VCN0_RAM: + return "VCN0_RAM"; + case AMDGPU_UCODE_ID_VCN1_RAM: + return "VCN1_RAM"; + case AMDGPU_UCODE_ID_DMCUB: + return "DMCUB"; + default: + return "UNKNOWN UCODE"; + } } #define FW_VERSION_ATTR(name, mode, field) \ @@ -383,9 +507,9 @@ static ssize_t show_##name(struct device *dev, \ char *buf) \ { \ struct drm_device *ddev = dev_get_drvdata(dev); \ - struct amdgpu_device *adev = ddev->dev_private; \ + struct amdgpu_device *adev = drm_to_adev(ddev); \ \ - return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \ + return sysfs_emit(buf, "0x%08x\n", adev->field); \ } \ static DEVICE_ATTR(name, mode, show_##name, NULL) @@ -401,10 +525,10 @@ FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version); FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version); FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); -FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version); -FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version); -FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version); -FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version); +FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version); +FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version); +FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version); +FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version); FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version); FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); @@ -447,6 +571,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct common_firmware_header *header = NULL; const struct gfx_firmware_header_v1_0 *cp_hdr = NULL; const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; + const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; + const struct mes_firmware_header_v1_0 *mes_hdr = NULL; + u8 *ucode_addr; if (NULL == ucode->fw) return 0; @@ -460,66 +587,86 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, header = (const struct common_firmware_header *)ucode->fw->data; cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data; - - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || - (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - - le32_to_cpu(cp_hdr->jt_size) * 4; - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) { - ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes) + - le32_to_cpu(cp_hdr->jt_offset) * 4), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; + mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_CP_MEC1: + case AMDGPU_UCODE_ID_CP_MEC2: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + le32_to_cpu(cp_hdr->jt_size) * 4; + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_CP_MEC1_JT: + case AMDGPU_UCODE_ID_CP_MEC2_JT: + ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(cp_hdr->jt_offset) * 4; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_cntl; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_gpm; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_srm; + break; + case AMDGPU_UCODE_ID_RLC_IRAM: + ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes; + ucode_addr = adev->gfx.rlc.rlc_iram_ucode; + break; + case AMDGPU_UCODE_ID_RLC_DRAM: + ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes; + ucode_addr = adev->gfx.rlc.rlc_dram_ucode; + break; + case AMDGPU_UCODE_ID_CP_MES: + ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(mes_hdr->mes_ucode_offset_bytes); + break; + case AMDGPU_UCODE_ID_CP_MES_DATA: + ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCU_ERAM: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(dmcu_hdr->intv_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) { - ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes) + - le32_to_cpu(dmcu_hdr->intv_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm, - ucode->ucode_size); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCU_INTV: + ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(dmcu_hdr->intv_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCUB: + ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + default: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + } + } else { + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); } + memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size); + return 0; } @@ -528,8 +675,8 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, { const struct gfx_firmware_header_v1_0 *header = NULL; const struct common_firmware_header *comm_hdr = NULL; - uint8_t* src_addr = NULL; - uint8_t* dst_addr = NULL; + uint8_t *src_addr = NULL; + uint8_t *dst_addr = NULL; if (NULL == ucode->fw) return 0; @@ -580,7 +727,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) struct amdgpu_firmware_info *ucode = NULL; /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */ - if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend)) + if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend)) return 0; /* * if SMU loaded firmware, it needn't add SMC, UVD, and VCE diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 914acecda5cf..7c2538db3cd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -71,49 +71,95 @@ struct smc_firmware_header_v2_1 { uint32_t pptable_entry_offset; }; +struct psp_fw_legacy_bin_desc { + uint32_t fw_version; + uint32_t offset_bytes; + uint32_t size_bytes; +}; + /* version_major=1, version_minor=0 */ struct psp_firmware_header_v1_0 { struct common_firmware_header header; - uint32_t ucode_feature_version; - uint32_t sos_offset_bytes; - uint32_t sos_size_bytes; + struct psp_fw_legacy_bin_desc sos; }; /* version_major=1, version_minor=1 */ struct psp_firmware_header_v1_1 { struct psp_firmware_header_v1_0 v1_0; - uint32_t toc_header_version; - uint32_t toc_offset_bytes; - uint32_t toc_size_bytes; - uint32_t kdb_header_version; - uint32_t kdb_offset_bytes; - uint32_t kdb_size_bytes; + struct psp_fw_legacy_bin_desc toc; + struct psp_fw_legacy_bin_desc kdb; }; /* version_major=1, version_minor=2 */ struct psp_firmware_header_v1_2 { struct psp_firmware_header_v1_0 v1_0; - uint32_t reserve[3]; - uint32_t kdb_header_version; - uint32_t kdb_offset_bytes; - uint32_t kdb_size_bytes; + struct psp_fw_legacy_bin_desc res; + struct psp_fw_legacy_bin_desc kdb; +}; + +/* version_major=1, version_minor=3 */ +struct psp_firmware_header_v1_3 { + struct psp_firmware_header_v1_1 v1_1; + struct psp_fw_legacy_bin_desc spl; + struct psp_fw_legacy_bin_desc rl; + struct psp_fw_legacy_bin_desc sys_drv_aux; + struct psp_fw_legacy_bin_desc sos_aux; +}; + +struct psp_fw_bin_desc { + uint32_t fw_type; + uint32_t fw_version; + uint32_t offset_bytes; + uint32_t size_bytes; +}; + +enum psp_fw_type { + PSP_FW_TYPE_UNKOWN, + PSP_FW_TYPE_PSP_SOS, + PSP_FW_TYPE_PSP_SYS_DRV, + PSP_FW_TYPE_PSP_KDB, + PSP_FW_TYPE_PSP_TOC, + PSP_FW_TYPE_PSP_SPL, + PSP_FW_TYPE_PSP_RL, + PSP_FW_TYPE_PSP_SOC_DRV, + PSP_FW_TYPE_PSP_INTF_DRV, + PSP_FW_TYPE_PSP_DBG_DRV, +}; + +/* version_major=2, version_minor=0 */ +struct psp_firmware_header_v2_0 { + struct common_firmware_header header; + uint32_t psp_fw_bin_count; + struct psp_fw_bin_desc psp_fw_bin[]; }; /* version_major=1, version_minor=0 */ struct ta_firmware_header_v1_0 { struct common_firmware_header header; - uint32_t ta_xgmi_ucode_version; - uint32_t ta_xgmi_offset_bytes; - uint32_t ta_xgmi_size_bytes; - uint32_t ta_ras_ucode_version; - uint32_t ta_ras_offset_bytes; - uint32_t ta_ras_size_bytes; - uint32_t ta_hdcp_ucode_version; - uint32_t ta_hdcp_offset_bytes; - uint32_t ta_hdcp_size_bytes; - uint32_t ta_dtm_ucode_version; - uint32_t ta_dtm_offset_bytes; - uint32_t ta_dtm_size_bytes; + struct psp_fw_legacy_bin_desc xgmi; + struct psp_fw_legacy_bin_desc ras; + struct psp_fw_legacy_bin_desc hdcp; + struct psp_fw_legacy_bin_desc dtm; + struct psp_fw_legacy_bin_desc securedisplay; +}; + +enum ta_fw_type { + TA_FW_TYPE_UNKOWN, + TA_FW_TYPE_PSP_ASD, + TA_FW_TYPE_PSP_XGMI, + TA_FW_TYPE_PSP_RAS, + TA_FW_TYPE_PSP_HDCP, + TA_FW_TYPE_PSP_DTM, + TA_FW_TYPE_PSP_RAP, + TA_FW_TYPE_PSP_SECUREDISPLAY, + TA_FW_TYPE_MAX_INDEX, +}; + +/* version_major=2, version_minor=0 */ +struct ta_firmware_header_v2_0 { + struct common_firmware_header header; + uint32_t ta_fw_bin_count; + struct psp_fw_bin_desc ta_fw_bin[]; }; /* version_major=1, version_minor=0 */ @@ -190,6 +236,15 @@ struct rlc_firmware_header_v2_1 { uint32_t save_restore_list_srm_offset_bytes; }; +/* version_major=2, version_minor=1 */ +struct rlc_firmware_header_v2_2 { + struct rlc_firmware_header_v2_1 v2_1; + uint32_t rlc_iram_ucode_size_bytes; + uint32_t rlc_iram_ucode_offset_bytes; + uint32_t rlc_dram_ucode_size_bytes; + uint32_t rlc_dram_ucode_offset_bytes; +}; + /* version_major=1, version_minor=0 */ struct sdma_firmware_header_v1_0 { struct common_firmware_header header; @@ -251,6 +306,13 @@ struct dmcu_firmware_header_v1_0 { uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */ }; +/* version_major=1, version_minor=0 */ +struct dmcub_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t inst_const_bytes; /* size of instruction region, in bytes */ + uint32_t bss_data_bytes; /* size of bss/data region, in bytes */ +}; + /* header is fixed size */ union amdgpu_firmware_header { struct common_firmware_header common; @@ -259,7 +321,10 @@ union amdgpu_firmware_header { struct smc_firmware_header_v2_0 smc_v2_0; struct psp_firmware_header_v1_0 psp; struct psp_firmware_header_v1_1 psp_v1_1; + struct psp_firmware_header_v1_3 psp_v1_3; + struct psp_firmware_header_v2_0 psp_v2_0; struct ta_firmware_header_v1_0 ta; + struct ta_firmware_header_v2_0 ta_v2_0; struct gfx_firmware_header_v1_0 gfx; struct rlc_firmware_header_v1_0 rlc; struct rlc_firmware_header_v2_0 rlc_v2_0; @@ -268,9 +333,12 @@ union amdgpu_firmware_header { struct sdma_firmware_header_v1_1 sdma_v1_1; struct gpu_info_firmware_header_v1_0 gpu_info; struct dmcu_firmware_header_v1_0 dmcu; + struct dmcub_firmware_header_v1_0 dmcub; uint8_t raw[0x100]; }; +#define UCODE_MAX_PSP_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) + /* * fw loading support */ @@ -295,6 +363,8 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM, AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM, + AMDGPU_UCODE_ID_RLC_IRAM, + AMDGPU_UCODE_ID_RLC_DRAM, AMDGPU_UCODE_ID_RLC_G, AMDGPU_UCODE_ID_STORAGE, AMDGPU_UCODE_ID_SMC, @@ -307,6 +377,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_DMCU_INTV, AMDGPU_UCODE_ID_VCN0_RAM, AMDGPU_UCODE_ID_VCN1_RAM, + AMDGPU_UCODE_ID_DMCUB, AMDGPU_UCODE_ID_MAXIMUM, }; @@ -389,4 +460,6 @@ void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev); enum amdgpu_firmware_load_type amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type); +const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index d4fb9cf27e21..a90029ee9733 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "umc_err_count", - .debugfs_name = "umc_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_umc_process_ras_data_cb, @@ -42,7 +41,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC; adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; adev->umc.ras_if->sub_block_index = 0; - strcpy(adev->umc.ras_if->name, "umc"); } ih_info.head = fs_info.head = *adev->umc.ras_if; @@ -61,8 +59,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) } /* ras init of specific umc version */ - if (adev->umc.funcs && adev->umc.funcs->err_cnt_init) - adev->umc.funcs->err_cnt_init(adev); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->err_cnt_init) + adev->umc.ras_funcs->err_cnt_init(adev); return 0; @@ -94,45 +93,50 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - /* When “Full RAS” is enabled, the per-IP interrupt sources should - * be disabled and the driver should only look for the aggregated - * interrupt via sync flood - */ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return AMDGPU_RAS_SUCCESS; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc.funcs && - adev->umc.funcs->query_ras_error_count) - adev->umc.funcs->query_ras_error_count(adev, ras_error_status); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); - if (adev->umc.funcs && - adev->umc.funcs->query_ras_error_address && + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address && adev->umc.max_ras_err_cnt_per_query) { err_data->err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, sizeof(struct eeprom_table_record), GFP_KERNEL); + /* still call query_ras_error_address to clear error status * even NOMEM error is encountered */ if(!err_data->err_addr) - DRM_WARN("Failed to alloc memory for umc error address record!\n"); + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); /* umc query_ras_error_address is also responsible for clearing * error status */ - adev->umc.funcs->query_ras_error_address(adev, ras_error_status); + adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); } /* only uncorrectable error needs gpu reset */ if (err_data->ue_count) { - if (err_data->err_addr_cnt && - amdgpu_ras_add_bad_pages(adev, err_data->err_addr, - err_data->err_addr_cnt)) - DRM_WARN("Failed to add ras bad page!\n"); + dev_info(adev->dev, "%ld uncorrectable hardware errors " + "detected in UMC block\n", + err_data->ue_count); + + if ((amdgpu_bad_page_threshold != 0) && + err_data->err_addr_cnt) { + amdgpu_ras_add_bad_pages(adev, err_data->err_addr, + err_data->err_addr_cnt); + amdgpu_ras_save_bad_pages(adev); + + if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num) + adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); + } - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_ras_reset_gpu(adev); } kfree(err_data->err_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 3283032a78e5..1f5fe2315236 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -21,48 +21,37 @@ #ifndef __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__ -/* implement 64 bits REG operations via 32 bits interface */ -#define RREG64_UMC(reg) (RREG32(reg) | \ - ((uint64_t)RREG32((reg) + 1) << 32)) -#define WREG64_UMC(reg, v) \ - do { \ - WREG32((reg), lower_32_bits(v)); \ - WREG32((reg) + 1, upper_32_bits(v)); \ - } while (0) - /* - * void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data, - * uint32_t umc_reg_offset, uint32_t channel_index) + * (addr / 256) * 4096, the higher 26 bits in ErrorAddr + * is the index of 4KB block + */ +#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4) +/* + * (addr / 256) * 8192, the higher 26 bits in ErrorAddr + * is the index of 8KB block */ -#define amdgpu_umc_for_each_channel(func) \ - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \ - uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \ - for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \ - /* enable the index mode to query eror count per channel */ \ - adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \ - for (channel_inst = 0; \ - channel_inst < adev->umc.channel_inst_num; \ - channel_inst++) { \ - /* calc the register offset according to channel instance */ \ - umc_reg_offset = adev->umc.channel_offs * channel_inst; \ - /* get channel index of interleaved memory */ \ - channel_index = adev->umc.channel_idx_tbl[ \ - umc_inst * adev->umc.channel_inst_num + channel_inst]; \ - (func)(adev, err_data, umc_reg_offset, channel_index); \ - } \ - } \ - adev->umc.funcs->disable_umc_index_mode(adev); +#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +/* channel index is the index of 256B block */ +#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) +/* offset in 256B block */ +#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) -struct amdgpu_umc_funcs { +#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++) +#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++) +#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) + +struct amdgpu_umc_ras_funcs { void (*err_cnt_init)(struct amdgpu_device *adev); int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); + void *ras_error_status); void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); - void (*enable_umc_index_mode)(struct amdgpu_device *adev, - uint32_t umc_instance); - void (*disable_umc_index_mode)(struct amdgpu_device *adev); + bool (*query_ras_poison_mode)(struct amdgpu_device *adev); +}; + +struct amdgpu_umc_funcs { void (*init_registers)(struct amdgpu_device *adev); }; @@ -80,6 +69,7 @@ struct amdgpu_umc { struct ras_common_if *ras_if; const struct amdgpu_umc_funcs *funcs; + const struct amdgpu_umc_ras_funcs *ras_funcs; }; int amdgpu_umc_ras_late_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h new file mode 100644 index 000000000000..919d9d401750 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h @@ -0,0 +1,51 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/ioctl.h> + +/* + * MMIO debugfs IOCTL structure + */ +struct amdgpu_debugfs_regs2_iocdata { + __u32 use_srbm, use_grbm, pg_lock; + struct { + __u32 se, sh, instance; + } grbm; + struct { + __u32 me, pipe, queue, vmid; + } srbm; +}; + +/* + * MMIO debugfs state data (per file* handle) + */ +struct amdgpu_debugfs_regs2_data { + struct amdgpu_device *adev; + struct mutex lock; + struct amdgpu_debugfs_regs2_iocdata id; +}; + +enum AMDGPU_DEBUGFS_REGS2_CMDS { + AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0, +}; + +#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e324bfe6c58f..6f8de11a17f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <drm/drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -54,6 +55,12 @@ #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) /* Firmware Names */ +#ifdef CONFIG_DRM_AMDGPU_SI +#define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin" +#define FIRMWARE_VERDE "amdgpu/verde_uvd.bin" +#define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin" +#define FIRMWARE_OLAND "amdgpu/oland_uvd.bin" +#endif #ifdef CONFIG_DRM_AMDGPU_CIK #define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin" #define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin" @@ -81,7 +88,7 @@ #define UVD_NO_OP 0x03ff #define UVD_BASE_SI 0x3800 -/** +/* * amdgpu_uvd_cs_ctx - Command submission parser context * * Used for emulating virtual memory support on UVD 4.2. @@ -100,6 +107,12 @@ struct amdgpu_uvd_cs_ctx { unsigned *buf_sizes; }; +#ifdef CONFIG_DRM_AMDGPU_SI +MODULE_FIRMWARE(FIRMWARE_TAHITI); +MODULE_FIRMWARE(FIRMWARE_VERDE); +MODULE_FIRMWARE(FIRMWARE_PITCAIRN); +MODULE_FIRMWARE(FIRMWARE_OLAND); +#endif #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); MODULE_FIRMWARE(FIRMWARE_KABINI); @@ -121,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); +static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo); + +static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev, + uint32_t size, + struct amdgpu_bo **bo_ptr) +{ + struct ttm_operation_ctx ctx = { true, false }; + struct amdgpu_bo *bo = NULL; + void *addr; + int r; + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &bo, NULL, &addr); + if (r) + return r; + + if (adev->uvd.address_64_bit) + goto succ; + + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_uvd_force_into_uvd_segment(bo); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + goto err; + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM); + if (r) + goto err_pin; + r = amdgpu_bo_kmap(bo, &addr); + if (r) + goto err_kmap; +succ: + amdgpu_bo_unreserve(bo); + *bo_ptr = bo; + return 0; +err_kmap: + amdgpu_bo_unpin(bo); +err_pin: +err: + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + return r; +} int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { @@ -133,6 +191,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + fw_name = FIRMWARE_TAHITI; + break; + case CHIP_VERDE: + fw_name = FIRMWARE_VERDE; + break; + case CHIP_PITCAIRN: + fw_name = FIRMWARE_PITCAIRN; + break; + case CHIP_OLAND: + fw_name = FIRMWARE_OLAND; + break; +#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: fw_name = FIRMWARE_BONAIRE; @@ -214,7 +286,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; - DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", + DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n", version_major, version_minor, family_id); /* @@ -233,7 +305,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if ((adev->asic_type == CHIP_POLARIS10 || adev->asic_type == CHIP_POLARIS11) && (adev->uvd.fw_version < FW_1_66_16)) - DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", + DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n", version_major, version_minor); } else { unsigned int enc_major, enc_minor, dec_minor; @@ -241,7 +313,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f; enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; - DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n", + DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n", enc_major, enc_minor, dec_minor, family_id); adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; @@ -275,6 +347,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); + if (r) + return r; + switch (adev->asic_type) { case CHIP_TONGA: adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; @@ -297,9 +373,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { + void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); int i, j; - cancel_delayed_work_sync(&adev->uvd.idle_work); drm_sched_entity_destroy(&adev->uvd.entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { @@ -316,6 +392,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); } + amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); release_firmware(adev->uvd.fw); return 0; @@ -330,12 +407,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_entity_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; int r; ring = &adev->uvd.inst[0].ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL); + sched = &ring->sched; + r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); if (r) { DRM_ERROR("Failed setting up UVD kernel entity.\n"); return r; @@ -348,7 +426,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i, j; + int i, j, idx; + bool in_ras_intr = amdgpu_ras_intr_triggered(); cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -375,14 +454,20 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.inst[j].saved_bo) return -ENOMEM; - /* re-write 0 since err_event_athub will corrupt VCPU buffer */ - if (amdgpu_ras_intr_triggered()) { - DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); - memset(adev->uvd.inst[j].saved_bo, 0, size); - } else { - memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + /* re-write 0 since err_event_athub will corrupt VCPU buffer */ + if (in_ras_intr) + memset(adev->uvd.inst[j].saved_bo, 0, size); + else + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + + drm_dev_exit(idx); } } + + if (in_ras_intr) + DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); + return 0; } @@ -390,7 +475,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; for (i = 0; i < adev->uvd.num_uvd_inst; i++) { if (adev->uvd.harvest_config & (1 << i)) @@ -402,7 +487,10 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.inst[i].cpu_addr; if (adev->uvd.inst[i].saved_bo != NULL) { - memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); + drm_dev_exit(idx); + } kvfree(adev->uvd.inst[i].saved_bo); adev->uvd.inst[i].saved_bo = NULL; } else { @@ -412,8 +500,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + drm_dev_exit(idx); + } size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } @@ -515,8 +606,9 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) /** * amdgpu_uvd_cs_msg_decode - handle UVD decode message * + * @adev: amdgpu_device pointer * @msg: pointer to message structure - * @buf_sizes: returned buffer sizes + * @buf_sizes: placeholder to put the different buffer lengths * * Peek into the decode message and calculate the necessary buffer sizes. */ @@ -798,9 +890,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, default: DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; } - BUG(); + return -EINVAL; } @@ -975,6 +1066,7 @@ static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, * amdgpu_uvd_ring_parse_cs - UVD command submission parser * * @parser: Command submission parser context + * @ib_idx: Which indirect buffer to use * * Parse the command stream, patch in addresses as necessary. */ @@ -1039,22 +1131,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - amdgpu_bo_kunmap(bo); - amdgpu_bo_unpin(bo); - - if (!ring->adev->uvd.address_64_bit) { - struct ttm_operation_ctx ctx = { true, false }; - - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); - amdgpu_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - goto err; - } - - r = amdgpu_job_alloc_with_ib(adev, 64, &job); + r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : + AMDGPU_IB_POOL_DELAYED, &job); if (r) - goto err; + return r; if (adev->asic_type >= CHIP_VEGA10) { offset_idx = 1 + ring->me; @@ -1082,9 +1162,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, - true, false, - msecs_to_jiffies(10)); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; if (r < 0) @@ -1095,7 +1174,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err_free; } else { r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv, - AMDGPU_FENCE_OWNER_UNDEFINED, false); + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); if (r) goto err_free; @@ -1105,9 +1185,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err_free; } + amdgpu_bo_reserve(bo, true); amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); if (fence) *fence = dma_fence_get(f); @@ -1117,10 +1197,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, err_free: amdgpu_job_free(job); - -err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -1131,16 +1207,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = adev->uvd.ib_bo; uint32_t *msg; - int r, i; - - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, (void **)&msg); - if (r) - return r; + int i; + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000000); @@ -1157,6 +1228,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, msg[i] = cpu_to_le32(0x0); return amdgpu_uvd_send_msg(ring, bo, true, fence); + } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, @@ -1167,12 +1239,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, (void **)&msg); - if (r) - return r; + if (direct) { + bo = adev->uvd.ib_bo; + } else { + r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo); + if (r) + return r; + } + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000002); @@ -1181,7 +1256,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_uvd_send_msg(ring, bo, direct, fence); + r = amdgpu_uvd_send_msg(ring, bo, direct, fence); + + if (!direct) + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + + return r; } static void amdgpu_uvd_idle_work_handler(struct work_struct *work) @@ -1247,6 +1327,7 @@ void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) * amdgpu_uvd_ring_test_ib - test ib execution * * @ring: amdgpu_ring pointer + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test if we can successfully execute an IB */ @@ -1255,10 +1336,17 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *fence; long r; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + r = amdgpu_uvd_get_create_msg(ring, 1, &fence); if (r) goto error; + r = dma_fence_wait_timeout(fence, false, timeout); + dma_fence_put(fence); + if (r == 0) + r = -ETIMEDOUT; + if (r < 0) + goto error; + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 5eb63288d157..76ac9699885d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -67,6 +67,8 @@ struct amdgpu_uvd { unsigned harvest_config; /* store image width to adjust nb memory state */ unsigned decode_image_width; + uint32_t keyselect; + struct amdgpu_bo *ib_bo; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 46b590af2fd2..688bef1649b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <drm/drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -81,15 +82,15 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, struct dma_fence **fence); static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence); /** - * amdgpu_vce_init - allocate memory, load vce firmware + * amdgpu_vce_sw_init - allocate memory, load vce firmware * * @adev: amdgpu_device pointer + * @size: size for the new BO * * First step to get VCE online, allocate memory and load the firmware */ @@ -178,7 +179,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) version_major = (ucode_version >> 20) & 0xfff; version_minor = (ucode_version >> 8) & 0xfff; binary_id = ucode_version & 0xff; - DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", + DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n", version_major, version_minor, binary_id); adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | (binary_id << 8)); @@ -203,7 +204,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) } /** - * amdgpu_vce_fini - free memory + * amdgpu_vce_sw_fini - free memory * * @adev: amdgpu_device pointer * @@ -216,7 +217,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; - cancel_delayed_work_sync(&adev->vce.idle_work); drm_sched_entity_destroy(&adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, @@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) int amdgpu_vce_entity_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; int r; ring = &adev->vce.ring[0]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL); + sched = &ring->sched; + r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); return r; @@ -291,7 +292,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) void *cpu_addr; const struct common_firmware_header *hdr; unsigned offset; - int r; + int r, idx; if (adev->vce.vcpu_bo == NULL) return -EINVAL; @@ -311,8 +312,12 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(cpu_addr, adev->vce.fw->data + offset, - adev->vce.fw->size - offset); + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + adev->vce.fw->size - offset); + drm_dev_exit(idx); + } amdgpu_bo_kunmap(adev->vce.vcpu_bo); @@ -427,31 +432,40 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) /** * amdgpu_vce_get_create_msg - generate a VCE create msg * - * @adev: amdgpu_device pointer * @ring: ring we should submit the msg to * @handle: VCE session handle to use + * @bo: amdgpu object for which we query the offset * @fence: optional fence to return * * Open up a stream for HW test */ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; + struct amdgpu_ib ib_msg; struct dma_fence *f = NULL; uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; - ib = &job->ibs[0]; + memset(&ib_msg, 0, sizeof(ib_msg)); + /* only one gpu page is needed, alloc +1 page to make addr aligned. */ + r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + &ib_msg); + if (r) + goto err; - addr = amdgpu_bo_gpu_offset(bo); + ib = &job->ibs[0]; + /* let addr point to page boundary */ + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr); /* stitch together an VCE create msg */ ib->length_dw = 0; @@ -491,6 +505,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_job_submit_direct(job, ring, &f); + amdgpu_ib_free(ring->adev, &ib_msg, f); if (r) goto err; @@ -507,9 +522,9 @@ err: /** * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg * - * @adev: amdgpu_device pointer * @ring: ring we should submit the msg to * @handle: VCE session handle to use + * @direct: direct or delayed pool * @fence: optional fence to return * * Close up a stream for HW test or if userspace failed to do so @@ -523,7 +538,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence *f = NULL; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + direct ? AMDGPU_IB_POOL_DIRECT : + AMDGPU_IB_POOL_DELAYED, &job); if (r) return r; @@ -569,9 +586,10 @@ err: } /** - * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary + * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary * * @p: parser context + * @ib_idx: indirect buffer to use * @lo: address of lower dword * @hi: address of higher dword * @size: minimum size @@ -621,9 +639,11 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx, * amdgpu_vce_cs_reloc - command submission relocation * * @p: parser context + * @ib_idx: indirect buffer to use * @lo: address of lower dword * @hi: address of higher dword * @size: minimum size + * @index: bs/fb index * * Patch relocation inside command stream with real buffer address */ @@ -651,7 +671,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, if ((addr + (uint64_t)size) > (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { - DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", + DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n", addr, lo, hi); return -EINVAL; } @@ -707,10 +727,10 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, } /** - * amdgpu_vce_cs_parse - parse and validate the command stream + * amdgpu_vce_ring_parse_cs - parse and validate the command stream * * @p: parser context - * + * @ib_idx: indirect buffer to use */ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) { @@ -943,10 +963,10 @@ out: } /** - * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode + * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode * * @p: parser context - * + * @ib_idx: indirect buffer to use */ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) { @@ -1036,7 +1056,9 @@ out: * amdgpu_vce_ring_emit_ib - execute indirect buffer * * @ring: engine to use + * @job: job to retrieve vmid from * @ib: the IB to execute + * @flags: unused * */ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, @@ -1054,7 +1076,9 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, * amdgpu_vce_ring_emit_fence - add a fence command to the ring * * @ring: engine to use - * @fence: the fence + * @addr: address + * @seq: sequence number + * @flags: fence related flags * */ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, @@ -1112,25 +1136,19 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) * amdgpu_vce_ring_test_ib - test if VCE IBs are working * * @ring: the engine to test on + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * */ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; long r; /* skip vce ring1/2 ib test for now, since it's not reliable */ if (ring != &ring->adev->vce.ring[0]) return 0; - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL); + r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) goto error; @@ -1146,7 +1164,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } + +enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring) +{ + switch(ring) { + case 0: + return AMDGPU_RING_PRIO_0; + case 1: + return AMDGPU_RING_PRIO_1; + case 2: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index d6d83a3ec803..be4a6e773c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -71,5 +71,6 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring); +enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 9d870444d7d6..2658414c503d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -27,39 +27,47 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/pci.h> - -#include <drm/drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" #include "amdgpu_vcn.h" #include "soc15d.h" -#include "soc15_common.h" - -#include "vcn/vcn_1_0_offset.h" -#include "vcn/vcn_1_0_sh_mask.h" - -/* 1 second timeout */ -#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) /* Firmware Names */ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" -#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" -#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" -#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" -#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" -#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" +#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" +#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" +#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" +#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" +#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" +#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" +#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" +#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" +#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" +#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" +#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" +#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" +#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); MODULE_FIRMWARE(FIRMWARE_RAVEN2); MODULE_FIRMWARE(FIRMWARE_ARCTURUS); MODULE_FIRMWARE(FIRMWARE_RENOIR); +MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE); +MODULE_FIRMWARE(FIRMWARE_ALDEBARAN); MODULE_FIRMWARE(FIRMWARE_NAVI10); MODULE_FIRMWARE(FIRMWARE_NAVI14); MODULE_FIRMWARE(FIRMWARE_NAVI12); +MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID); +MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER); +MODULE_FIRMWARE(FIRMWARE_VANGOGH); +MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH); +MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY); +MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -72,39 +80,85 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) int i, r; INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); - - switch (adev->asic_type) { - case CHIP_RAVEN: - if (adev->rev_id >= 8) + mutex_init(&adev->vcn.vcn_pg_lock); + mutex_init(&adev->vcn.vcn1_jpeg1_workaround); + atomic_set(&adev->vcn.total_submission_cnt, 0); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); + + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + if (adev->apu_flags & AMD_APU_IS_RAVEN2) fw_name = FIRMWARE_RAVEN2; - else if (adev->pdev->device == 0x15d8) + else if (adev->apu_flags & AMD_APU_IS_PICASSO) fw_name = FIRMWARE_PICASSO; else fw_name = FIRMWARE_RAVEN; break; - case CHIP_ARCTURUS: + case IP_VERSION(2, 5, 0): fw_name = FIRMWARE_ARCTURUS; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + case IP_VERSION(2, 2, 0): + if (adev->apu_flags & AMD_APU_IS_RENOIR) + fw_name = FIRMWARE_RENOIR; + else + fw_name = FIRMWARE_GREEN_SARDINE; + + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; break; - case CHIP_RENOIR: - fw_name = FIRMWARE_RENOIR; + case IP_VERSION(2, 6, 0): + fw_name = FIRMWARE_ALDEBARAN; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI10: + case IP_VERSION(2, 0, 0): fw_name = FIRMWARE_NAVI10; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI14: - fw_name = FIRMWARE_NAVI14; + case IP_VERSION(2, 0, 2): + if (adev->asic_type == CHIP_NAVI12) + fw_name = FIRMWARE_NAVI12; + else + fw_name = FIRMWARE_NAVI14; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + case IP_VERSION(3, 0, 0): + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + fw_name = FIRMWARE_SIENNA_CICHLID; + else + fw_name = FIRMWARE_NAVY_FLOUNDER; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + case IP_VERSION(3, 0, 2): + fw_name = FIRMWARE_VANGOGH; + break; + case IP_VERSION(3, 0, 16): + fw_name = FIRMWARE_DIMGREY_CAVEFISH; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + case IP_VERSION(3, 0, 33): + fw_name = FIRMWARE_BEIGE_GOBY; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI12: - fw_name = FIRMWARE_NAVI12; + case IP_VERSION(3, 1, 1): + fw_name = FIRMWARE_YELLOW_CARP; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; @@ -147,7 +201,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) enc_major = fw_check; dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; - DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", + DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n", enc_major, enc_minor, dec_ver, vep, fw_rev); } else { unsigned int version_major, version_minor, family_id; @@ -155,13 +209,14 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) family_id = le32_to_cpu(hdr->ucode_version) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; - DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", + DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n", version_major, version_minor, family_id); } bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) @@ -174,15 +229,20 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); return r; } - } - if (adev->vcn.indirect_sram) { - r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo, - &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r); - return r; + adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr + + bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); + adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr + + bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); + + if (adev->vcn.indirect_sram) { + r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, + &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); + if (r) { + dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); + return r; + } } } @@ -193,17 +253,15 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) { int i, j; - cancel_delayed_work_sync(&adev->vcn.idle_work); - - if (adev->vcn.indirect_sram) { - amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, - &adev->vcn.dpg_sram_gpu_addr, - (void **)&adev->vcn.dpg_sram_cpu_addr); - } - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; + + if (adev->vcn.indirect_sram) { + amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, + &adev->vcn.inst[j].dpg_sram_gpu_addr, + (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); + } kvfree(adev->vcn.inst[j].saved_bo); amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, @@ -214,20 +272,43 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_enc_rings; ++i) amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); - - amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg); } release_firmware(adev->vcn.fw); + mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); + mutex_destroy(&adev->vcn.vcn_pg_lock); return 0; } +bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) +{ + bool ret = false; + + int major; + int minor; + int revision; + + /* if cannot find IP data, then this VCN does not exist */ + if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0) + return true; + + if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) { + ret = true; + } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) { + ret = true; + } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) { + ret = true; + } + + return ret; +} + int amdgpu_vcn_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -244,7 +325,10 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) if (!adev->vcn.inst[i].saved_bo) return -ENOMEM; - memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + drm_dev_exit(idx); + } } return 0; } @@ -253,7 +337,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -265,7 +349,10 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) ptr = adev->vcn.inst[i].cpu_addr; if (adev->vcn.inst[i].saved_bo != NULL) { - memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); + drm_dev_exit(idx); + } kvfree(adev->vcn.inst[i].saved_bo); adev->vcn.inst[i].saved_bo = NULL; } else { @@ -275,8 +362,11 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vcn.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + drm_dev_exit(idx); + } size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } @@ -292,10 +382,12 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) container_of(work, struct amdgpu_device, vcn.idle_work.work); unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; unsigned int i, j; + int r = 0; for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); } @@ -303,31 +395,26 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; - if (fence[j]) + if (fence[j] || + unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg)) - new_state.jpeg = VCN_DPG_STATE__PAUSE; - else - new_state.jpeg = VCN_DPG_STATE__UNPAUSE; - - adev->vcn.pause_dpg_mode(adev, &new_state); + adev->vcn.pause_dpg_mode(adev, j, &new_state); } - fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg); fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); fences += fence[j]; } - if (fences == 0) { - amdgpu_gfx_off_ctrl(adev, true); - if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, false); - else - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, - AMD_PG_STATE_GATE); + if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); + r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, + false); + if (r) + dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); } else { schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); } @@ -336,46 +423,53 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); + int r = 0; - if (set_clocks) { - amdgpu_gfx_off_ctrl(adev, false); - if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) - amdgpu_dpm_enable_uvd(adev, true); - else - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, - AMD_PG_STATE_UNGATE); + atomic_inc(&adev->vcn.total_submission_cnt); + + if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { + r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, + true); + if (r) + dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); } + mutex_lock(&adev->vcn.vcn_pg_lock); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; - unsigned int fences = 0; - unsigned int i; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); - } - if (fences) + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { + atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); new_state.fw_based = VCN_DPG_STATE__PAUSE; - else - new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + } else { + unsigned int fences = 0; + unsigned int i; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg)) - new_state.jpeg = VCN_DPG_STATE__PAUSE; - else - new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) - new_state.fw_based = VCN_DPG_STATE__PAUSE; - else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) - new_state.jpeg = VCN_DPG_STATE__PAUSE; + if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + } - adev->vcn.pause_dpg_mode(adev, &new_state); + adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); } + mutex_unlock(&adev->vcn.vcn_pg_lock); } void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) { + if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && + ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); + + atomic_dec(&ring->adev->vcn.total_submission_cnt); + schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); } @@ -386,6 +480,10 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + /* VCN in SRIOV does not support direct register read/write */ + if (amdgpu_sriov_vf(adev)) + return 0; + WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) @@ -406,23 +504,54 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) return r; } +int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t rptr; + unsigned int i; + int r; + + if (amdgpu_sriov_vf(adev)) + return 0; + + r = amdgpu_ring_alloc(ring, 16); + if (r) + return r; + + rptr = amdgpu_ring_get_rptr(ring); + + amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + if (amdgpu_ring_get_rptr(ring) != rptr) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; - r = amdgpu_job_alloc_with_ib(adev, 64, &job); + r = amdgpu_job_alloc_with_ib(adev, 64, + AMDGPU_IB_POOL_DIRECT, &job); if (r) goto err; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); ib->ptr[1] = addr; ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); @@ -439,9 +568,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, if (r) goto err_free; - amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); if (fence) *fence = dma_fence_get(f); @@ -451,27 +578,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, err_free: amdgpu_job_free(job); - err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); return r; } static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000038); msg[2] = cpu_to_le32(0x00000001); @@ -489,23 +615,24 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand for (i = 14; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_vcn_dec_send_msg(ring, bo, fence); + return 0; } static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000018); msg[2] = cpu_to_le32(0x00000000); @@ -515,19 +642,112 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han for (i = 6; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_vcn_dec_send_msg(ring, bo, fence); + return 0; } int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct dma_fence *fence; + struct dma_fence *fence = NULL; + struct amdgpu_ib ib; + long r; + + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); + if (r) + goto error; + + r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); + if (r) + goto error; + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); + if (r) + goto error; + + r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); + if (r) + goto error; + + r = dma_fence_wait_timeout(fence, false, timeout); + if (r == 0) + r = -ETIMEDOUT; + else if (r > 0) + r = 0; + + dma_fence_put(fence); +error: + return r; +} + +static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, + struct amdgpu_ib *ib_msg, + struct dma_fence **fence) +{ + struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; + const unsigned int ib_size_dw = 64; + struct amdgpu_device *adev = ring->adev; + struct dma_fence *f = NULL; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); + int i, r; + + r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); + if (r) + goto err; + + ib = &job->ibs[0]; + ib->length_dw = 0; + + ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; + ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER); + decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]); + ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4; + memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer)); + + decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER); + decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32); + decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr); + + for (i = ib->length_dw; i < ib_size_dw; ++i) + ib->ptr[i] = 0x0; + + r = amdgpu_job_submit_direct(job, ring, &f); + if (r) + goto err_free; + + amdgpu_ib_free(adev, ib_msg, f); + + if (fence) + *fence = dma_fence_get(f); + dma_fence_put(f); + + return 0; + +err_free: + amdgpu_job_free(job); +err: + amdgpu_ib_free(adev, ib_msg, f); + return r; +} + +int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct dma_fence *fence = NULL; + struct amdgpu_ib ib; long r; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); + if (r) + goto error; + + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); + if (r) + goto error; + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); if (r) goto error; @@ -549,6 +769,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_ring_alloc(ring, 16); if (r) return r; @@ -571,7 +794,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { const unsigned ib_size_dw = 16; @@ -581,12 +804,13 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; @@ -624,7 +848,7 @@ err: } static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { const unsigned ib_size_dw = 16; @@ -634,12 +858,13 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; @@ -678,21 +903,23 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { + struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_ib ib; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_DIRECT, + &ib); if (r) return r; - r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); + r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); + r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); if (r) goto error; @@ -703,113 +930,49 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; error: + amdgpu_ib_free(adev, &ib, fence); dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); - return r; -} - -int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t tmp = 0; - unsigned i; - int r; - - WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); - r = amdgpu_ring_alloc(ring, 3); - if (r) - return r; - - amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0)); - amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_commit(ring); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); - if (tmp == 0xDEADBEEF) - break; - udelay(1); - } - - if (i >= adev->usec_timeout) - r = -ETIMEDOUT; return r; } -static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) +enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) { - struct amdgpu_device *adev = ring->adev; - struct amdgpu_job *job; - struct amdgpu_ib *ib; - struct dma_fence *f = NULL; - const unsigned ib_size_dw = 16; - int i, r; - - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); - if (r) - return r; - - ib = &job->ibs[0]; - - ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); - ib->ptr[1] = 0xDEADBEEF; - for (i = 2; i < 16; i += 2) { - ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); - ib->ptr[i+1] = 0; + switch(ring) { + case 0: + return AMDGPU_RING_PRIO_0; + case 1: + return AMDGPU_RING_PRIO_1; + case 2: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; } - ib->length_dw = 16; - - r = amdgpu_job_submit_direct(job, ring, &f); - if (r) - goto err; - - if (fence) - *fence = dma_fence_get(f); - dma_fence_put(f); - - return 0; - -err: - amdgpu_job_free(job); - return r; } -int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) +void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) { - struct amdgpu_device *adev = ring->adev; - uint32_t tmp = 0; - unsigned i; - struct dma_fence *fence = NULL; - long r = 0; - - r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); - if (r) - goto error; - - r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - r = -ETIMEDOUT; - goto error; - } else if (r < 0) { - goto error; - } else { - r = 0; - } - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); - if (tmp == 0xDEADBEEF) - break; - udelay(1); + int i; + unsigned int idx; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + /* currently only support 2 FW instances */ + if (i >= 2) { + dev_info(adev->dev, "More then 2 VCN FW instances!\n"); + break; + } + idx = AMDGPU_UCODE_ID_VCN + i; + adev->firmware.ucode[idx].ucode_id = idx; + adev->firmware.ucode[idx].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + } + dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); } - - if (i >= adev->usec_timeout) - r = -ETIMEDOUT; - - dma_fence_put(fence); -error: - return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index dface275c81a..bfa27ea94804 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -31,6 +31,7 @@ #define AMDGPU_VCN_MAX_ENC_RINGS 3 #define AMDGPU_MAX_VCN_INSTANCES 2 +#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) @@ -43,6 +44,17 @@ #define VCN_DEC_CMD_PACKET_START 0x0000000a #define VCN_DEC_CMD_PACKET_END 0x0000000b +#define VCN_DEC_SW_CMD_NO_OP 0x00000000 +#define VCN_DEC_SW_CMD_END 0x00000001 +#define VCN_DEC_SW_CMD_IB 0x00000002 +#define VCN_DEC_SW_CMD_FENCE 0x00000003 +#define VCN_DEC_SW_CMD_TRAP 0x00000004 +#define VCN_DEC_SW_CMD_IB_AUTO 0x00000005 +#define VCN_DEC_SW_CMD_SEMAPHORE 0x00000006 +#define VCN_DEC_SW_CMD_PREEMPT_FENCE 0x00000009 +#define VCN_DEC_SW_CMD_REG_WRITE 0x0000000b +#define VCN_DEC_SW_CMD_REG_WAIT 0x0000000c + #define VCN_ENC_CMD_NO_OP 0x00000000 #define VCN_ENC_CMD_END 0x00000001 #define VCN_ENC_CMD_IB 0x00000002 @@ -52,80 +64,113 @@ #define VCN_ENC_CMD_REG_WAIT 0x0000000c #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 +#define VCN1_VID_SOC_ADDRESS_3_0 0x48200 #define VCN_AON_SOC_ADDRESS_2_0 0x1f800 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define VCN_VID_IP_ADDRESS_2_0 0x0 #define VCN_AON_IP_ADDRESS_2_0 0x30000 -#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \ - ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ +#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b +#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 +#define mmUVD_REG_XX_MASK 0x026c +#define mmUVD_REG_XX_MASK_BASE_IDX 1 + +/* 1 second timeout */ +#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) + +#define RREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, mask, sram_sel) \ + ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__MASK_EN_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ - RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \ + RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \ +#define WREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, value, mask, sram_sel) \ do { \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ } while (0) -#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \ +#define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \ ({ \ uint32_t internal_reg_offset, addr; \ - bool video_range, aon_range; \ + bool video_range, video1_range, aon_range, aon1_range; \ \ - addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ addr <<= 2; \ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \ + video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS_3_0)) && \ + ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS_3_0 + 0x2600))))); \ aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS_2_0)) && \ ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS_2_0 + 0x600))))); \ + aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS_3_0)) && \ + ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS_3_0 + 0x600))))); \ if (video_range) \ internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS_2_0) + \ (VCN_VID_IP_ADDRESS_2_0)); \ else if (aon_range) \ internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS_2_0) + \ (VCN_AON_IP_ADDRESS_2_0)); \ + else if (video1_range) \ + internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS_3_0) + \ + (VCN_VID_IP_ADDRESS_2_0)); \ + else if (aon1_range) \ + internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS_3_0) + \ + (VCN_AON_IP_ADDRESS_2_0)); \ else \ internal_reg_offset = (0xFFFFF & addr); \ \ internal_reg_offset >>= 2; \ }) -#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) \ - ({ \ - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \ - (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ - RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA); \ +#define RREG32_SOC15_DPG_MODE(inst_idx, offset, mask_en) \ + ({ \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ + (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect) \ - do { \ - if (!indirect) { \ - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \ - (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ - } else { \ - *adev->vcn.dpg_sram_curr_addr++ = offset; \ - *adev->vcn.dpg_sram_curr_addr++ = value; \ - } \ +#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ + do { \ + if (!indirect) { \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ + (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + } else { \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \ + } \ } while (0) +#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6) +#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) +#define AMDGPU_VCN_SW_RING_FLAG (1 << 9) + +#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001 +#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001 + +enum fw_queue_mode { + FW_QUEUE_RING_RESET = 1, + FW_QUEUE_DPG_HOLD_OFF = 2, +}; + enum engine_status_constants { UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0, + UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0 = 0x2A2A8AA0, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002, UVD_STATUS__UVD_BUSY = 0x00000004, GB_ADDR_CONFIG_DEFAULT = 0x26010011, @@ -158,7 +203,6 @@ struct amdgpu_vcn_reg{ unsigned ib_size; unsigned gp_scratch8; unsigned scratch9; - unsigned jpeg_pitch; }; struct amdgpu_vcn_inst { @@ -168,9 +212,17 @@ struct amdgpu_vcn_inst { void *saved_bo; struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; - struct amdgpu_ring ring_jpeg; + atomic_t sched_score; struct amdgpu_irq_src irq; struct amdgpu_vcn_reg external; + struct amdgpu_bo *dpg_sram_bo; + struct dpg_pause_state pause_state; + void *dpg_sram_cpu_addr; + uint64_t dpg_sram_gpu_addr; + uint32_t *dpg_sram_curr_addr; + atomic_t dpg_enc_submission_cnt; + void *fw_shared_cpu_addr; + uint64_t fw_shared_gpu_addr; }; struct amdgpu_vcn { @@ -179,21 +231,63 @@ struct amdgpu_vcn { const struct firmware *fw; /* VCN firmware */ unsigned num_enc_rings; enum amd_powergating_state cur_state; - struct dpg_pause_state pause_state; - bool indirect_sram; - struct amdgpu_bo *dpg_sram_bo; - void *dpg_sram_cpu_addr; - uint64_t dpg_sram_gpu_addr; - uint32_t *dpg_sram_curr_addr; uint8_t num_vcn_inst; - struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; - struct amdgpu_vcn_reg internal; + struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; + struct amdgpu_vcn_reg internal; + struct mutex vcn_pg_lock; + struct mutex vcn1_jpeg1_workaround; + atomic_t total_submission_cnt; unsigned harvest_config; int (*pause_dpg_mode)(struct amdgpu_device *adev, - struct dpg_pause_state *new_state); + int inst_idx, struct dpg_pause_state *new_state); +}; + +struct amdgpu_fw_shared_rb_ptrs_struct { + /* to WA DPG R/W ptr issues.*/ + uint32_t rptr; + uint32_t wptr; +}; + +struct amdgpu_fw_shared_multi_queue { + uint8_t decode_queue_mode; + uint8_t encode_generalpurpose_queue_mode; + uint8_t encode_lowlatency_queue_mode; + uint8_t encode_realtime_queue_mode; + uint8_t padding[4]; +}; + +struct amdgpu_fw_shared_sw_ring { + uint8_t is_enabled; + uint8_t padding[3]; +}; + +struct amdgpu_fw_shared { + uint32_t present_flag_0; + uint8_t pad[44]; + struct amdgpu_fw_shared_rb_ptrs_struct rb; + uint8_t pad1[1]; + struct amdgpu_fw_shared_multi_queue multi_queue; + struct amdgpu_fw_shared_sw_ring sw_ring; +}; + +struct amdgpu_vcn_decode_buffer { + uint32_t valid_buf_flag; + uint32_t msg_buffer_address_hi; + uint32_t msg_buffer_address_lo; + uint32_t pad[30]; +}; + +#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80 +#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40 +#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0 + +enum vcn_ring_type { + VCN_ENCODE_RING, + VCN_DECODE_RING, + VCN_UNIFIED_RING, }; int amdgpu_vcn_sw_init(struct amdgpu_device *adev); @@ -203,13 +297,19 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev); void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring); +bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, + enum vcn_ring_type type, uint32_t vcn_instance); + int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); +int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring); +int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); -int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring); -int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout); +enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring); + +void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c index 7f7097931c6f..f9d3d79f68b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c @@ -59,7 +59,7 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev) return; } /* - TODO: Enable these code when pv2vf_info is merged + TODO: Enable these code when pv2vf_info is merged AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags); if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) { return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e32ae906d797..04cf9b207e62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -26,6 +26,16 @@ #include <drm/drm_drv.h> #include "amdgpu.h" +#include "amdgpu_ras.h" +#include "vi.h" +#include "soc15.h" +#include "nv.h" + +#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \ + do { \ + vf2pf_info->ucode_info[ucode].id = ucode; \ + vf2pf_info->ucode_info[ucode].version = ver; \ + } while (0) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) { @@ -37,106 +47,20 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) void amdgpu_virt_init_setting(struct amdgpu_device *adev) { + struct drm_device *ddev = adev_to_drm(adev); + /* enable virtual display */ - adev->mode_info.num_crtc = 1; - adev->enable_virtual_display = true; - adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; + if (adev->asic_type != CHIP_ALDEBARAN && + adev->asic_type != CHIP_ARCTURUS) { + if (adev->mode_info.num_crtc == 0) + adev->mode_info.num_crtc = 1; + adev->enable_virtual_display = true; + } + ddev->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; adev->pg_flags = 0; } -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_rreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_rreg(ring, reg); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_read; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_read; - - return adev->wb.wb[adev->virt.reg_val_offs]; - -failed_kiq_read: - pr_err("failed to read reg:%x\n", reg); - return ~0; -} - -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_wreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_wreg(ring, reg, v); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_write; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_write; - - return; - -failed_kiq_write: - pr_err("failed to write reg:%x\n", reg); -} - void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) @@ -151,7 +75,10 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, amdgpu_ring_alloc(ring, 32); amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, ref, mask); - amdgpu_fence_emit_polling(ring, &seq); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + amdgpu_ring_commit(ring); spin_unlock_irqrestore(&kiq->ring_lock, flags); @@ -173,13 +100,16 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, return; +failed_undo: + amdgpu_ring_undo(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); failed_kiq: - pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); + dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1); } /** * amdgpu_virt_request_full_gpu() - request full gpu access - * @amdgpu: amdgpu device. + * @adev: amdgpu device. * @init: is driver init time. * When start to init/fini driver, first need to request full gpu access. * Return: Zero if request success, otherwise will return error. @@ -202,7 +132,7 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) /** * amdgpu_virt_release_full_gpu() - release full gpu access - * @amdgpu: amdgpu device. + * @adev: amdgpu device. * @init: is driver init time. * When finishing driver init/fini, need to release full gpu access. * Return: Zero if release success, otherwise will returen error. @@ -224,7 +154,7 @@ int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) /** * amdgpu_virt_reset_gpu() - reset gpu - * @amdgpu: amdgpu device. + * @adev: amdgpu device. * Send reset command to GPU hypervisor to reset GPU that VM is using * Return: Zero if reset success, otherwise will return error. */ @@ -244,9 +174,22 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) return 0; } +void amdgpu_virt_request_init_data(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + + if (virt->ops && virt->ops->req_init_data) + virt->ops->req_init_data(adev); + + if (adev->virt.req_init_data_ver > 0) + DRM_INFO("host supports REQ_INIT_DATA handshake\n"); + else + DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); +} + /** * amdgpu_virt_wait_reset() - wait for reset gpu completed - * @amdgpu: amdgpu device. + * @adev: amdgpu device. * Wait for GPU reset completed. * Return: Zero if reset success, otherwise will return error. */ @@ -262,7 +205,7 @@ int amdgpu_virt_wait_reset(struct amdgpu_device *adev) /** * amdgpu_virt_alloc_mm_table() - alloc memory for mm table - * @amdgpu: amdgpu device. + * @adev: amdgpu device. * MM table is used by UVD and VCE for its initialization * Return: Zero if allocate success. */ @@ -292,7 +235,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) /** * amdgpu_virt_free_mm_table() - free mm table memory - * @amdgpu: amdgpu device. + * @adev: amdgpu device. * Free MM table memory */ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) @@ -307,10 +250,10 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) } -int amdgpu_virt_fw_reserve_get_checksum(void *obj, - unsigned long obj_size, - unsigned int key, - unsigned int chksum) +unsigned int amd_sriov_msg_checksum(void *obj, + unsigned long obj_size, + unsigned int key, + unsigned int checksum) { unsigned int ret = key; unsigned long i = 0; @@ -320,113 +263,546 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, /* calculate checksum */ for (i = 0; i < obj_size; ++i) ret += *(pos + i); - /* minus the chksum itself */ - pos = (char *)&chksum; - for (i = 0; i < sizeof(chksum); ++i) + /* minus the checksum itself */ + pos = (char *)&checksum; + for (i = 0; i < sizeof(checksum); ++i) ret -= *(pos + i); return ret; } -void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) +static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data; + /* GPU will be marked bad on host if bp count more then 10, + * so alloc 512 is enough. + */ + unsigned int align_space = 512; + void *bps = NULL; + struct amdgpu_bo **bps_bo = NULL; + + *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL); + if (!*data) + return -ENOMEM; + + bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL); + bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + + if (!bps || !bps_bo) { + kfree(bps); + kfree(bps_bo); + kfree(*data); + return -ENOMEM; + } + + (*data)->bps = bps; + (*data)->bps_bo = bps_bo; + (*data)->count = 0; + (*data)->last_reserved = 0; + + virt->ras_init_done = true; + + return 0; +} + +static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; + struct amdgpu_bo *bo; + int i; + + if (!data) + return; + + for (i = data->last_reserved - 1; i >= 0; i--) { + bo = data->bps_bo[i]; + amdgpu_bo_free_kernel(&bo, NULL, NULL); + data->bps_bo[i] = bo; + data->last_reserved = i; + } +} + +void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; + + virt->ras_init_done = false; + + if (!data) + return; + + amdgpu_virt_ras_release_bp(adev); + + kfree(data->bps); + kfree(data->bps_bo); + kfree(data); + virt->virt_eh_data = NULL; +} + +static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev, + struct eeprom_table_record *bps, int pages) +{ + struct amdgpu_virt *virt = &adev->virt; + struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; + + if (!data) + return; + + memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); + data->count += pages; +} + +static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; + struct amdgpu_bo *bo = NULL; + uint64_t bp; + int i; + + if (!data) + return; + + for (i = data->last_reserved; i < data->count; i++) { + bp = data->bps[i].retired_page; + + /* There are two cases of reserve error should be ignored: + * 1) a ras bad page has been allocated (used by someone); + * 2) a ras bad page has been reserved (duplicate error injection + * for one page); + */ + if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL)) + DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); + + data->bps_bo[i] = bo; + data->last_reserved = i + 1; + bo = NULL; + } +} + +static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t retired_page) { - uint32_t pf2vf_size = 0; - uint32_t checksum = 0; + struct amdgpu_virt *virt = &adev->virt; + struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data; + int i; + + if (!data) + return true; + + for (i = 0; i < data->count; i++) + if (retired_page == data->bps[i].retired_page) + return true; + + return false; +} + +static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev, + uint64_t bp_block_offset, uint32_t bp_block_size) +{ + struct eeprom_table_record bp; + uint64_t retired_page; + uint32_t bp_idx, bp_cnt; + + if (bp_block_size) { + bp_cnt = bp_block_size / sizeof(uint64_t); + for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) { + retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va + + bp_block_offset + bp_idx * sizeof(uint64_t)); + bp.retired_page = retired_page; + + if (amdgpu_virt_ras_check_bad_page(adev, retired_page)) + continue; + + amdgpu_virt_ras_add_bps(adev, &bp, 1); + + amdgpu_virt_ras_reserve_bps(adev); + } + } +} + +static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) +{ + struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf; + uint32_t checksum; uint32_t checkval; - char *str; + + uint32_t i; + uint32_t tmp; + + if (adev->virt.fw_reserve.p_pf2vf == NULL) + return -EINVAL; + + if (pf2vf_info->size > 1024) { + DRM_ERROR("invalid pf2vf message size\n"); + return -EINVAL; + } + + switch (pf2vf_info->version) { + case 1: + checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum; + checkval = amd_sriov_msg_checksum( + adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, + adev->virt.fw_reserve.checksum_key, checksum); + if (checksum != checkval) { + DRM_ERROR("invalid pf2vf message\n"); + return -EINVAL; + } + + adev->virt.gim_feature = + ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags; + break; + case 2: + /* TODO: missing key, need to add it later */ + checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum; + checkval = amd_sriov_msg_checksum( + adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, + 0, checksum); + if (checksum != checkval) { + DRM_ERROR("invalid pf2vf message\n"); + return -EINVAL; + } + + adev->virt.vf2pf_update_interval_ms = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms; + adev->virt.gim_feature = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all; + adev->virt.reg_access = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all; + + adev->virt.decode_max_dimension_pixels = 0; + adev->virt.decode_max_frame_pixels = 0; + adev->virt.encode_max_dimension_pixels = 0; + adev->virt.encode_max_frame_pixels = 0; + adev->virt.is_mm_bw_enabled = false; + for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) { + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels; + adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels); + + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels; + adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels); + + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels; + adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels); + + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels; + adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels); + } + if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0)) + adev->virt.is_mm_bw_enabled = true; + + adev->unique_id = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; + break; + default: + DRM_ERROR("invalid pf2vf version\n"); + return -EINVAL; + } + + /* correct too large or too little interval value */ + if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000) + adev->virt.vf2pf_update_interval_ms = 2000; + + return 0; +} + +static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) +{ + struct amd_sriov_msg_vf2pf_info *vf2pf_info; + vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; + + if (adev->virt.fw_reserve.p_vf2pf == NULL) + return; + + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, + adev->psp.asd_context.bin_desc.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, + adev->psp.ras_context.context.bin_desc.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, + adev->psp.xgmi_context.context.bin_desc.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version); +} + +static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) +{ + struct amd_sriov_msg_vf2pf_info *vf2pf_info; + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + + vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; + + if (adev->virt.fw_reserve.p_vf2pf == NULL) + return -EINVAL; + + memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info)); + + vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info); + vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER; + +#ifdef MODULE + if (THIS_MODULE->version != NULL) + strcpy(vf2pf_info->driver_version, THIS_MODULE->version); + else +#endif + strcpy(vf2pf_info->driver_version, "N/A"); + + vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all + vf2pf_info->driver_cert = 0; + vf2pf_info->os_info.all = 0; + + vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20; + vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20; + vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; + vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; + + amdgpu_virt_populate_vf2pf_ucode_info(adev); + + /* TODO: read dynamic info */ + vf2pf_info->gfx_usage = 0; + vf2pf_info->compute_usage = 0; + vf2pf_info->encode_usage = 0; + vf2pf_info->decode_usage = 0; + + vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; + vf2pf_info->checksum = + amd_sriov_msg_checksum( + vf2pf_info, vf2pf_info->header.size, 0, 0); + + return 0; +} + +static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) +{ + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work); + int ret; + + ret = amdgpu_virt_read_pf2vf_data(adev); + if (ret) + goto out; + amdgpu_virt_write_vf2pf_data(adev); + +out: + schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); +} + +void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) +{ + if (adev->virt.vf2pf_update_interval_ms != 0) { + DRM_INFO("clean up the vf2pf work item\n"); + cancel_delayed_work_sync(&adev->virt.vf2pf_work); + adev->virt.vf2pf_update_interval_ms = 0; + } +} + +void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) +{ + uint64_t bp_block_offset = 0; + uint32_t bp_block_size = 0; + struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; + adev->virt.vf2pf_update_interval_ms = 0; + + if (adev->mman.fw_vram_usage_va != NULL) { + adev->virt.vf2pf_update_interval_ms = 2000; - if (adev->fw_vram_usage.va != NULL) { adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *)( - adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); - AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); - AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); - AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); - - /* pf2vf message must be in 4K */ - if (pf2vf_size > 0 && pf2vf_size < 4096) { - checkval = amdgpu_virt_fw_reserve_get_checksum( - adev->virt.fw_reserve.p_pf2vf, pf2vf_size, - adev->virt.fw_reserve.checksum_key, checksum); - if (checkval == checksum) { - adev->virt.fw_reserve.p_vf2pf = - ((void *)adev->virt.fw_reserve.p_pf2vf + - pf2vf_size); - memset((void *)adev->virt.fw_reserve.p_vf2pf, 0, - sizeof(amdgim_vf2pf_info)); - AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version, - AMDGPU_FW_VRAM_VF2PF_VER); - AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size, - sizeof(amdgim_vf2pf_info)); - AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, - &str); -#ifdef MODULE - if (THIS_MODULE->version != NULL) - strcpy(str, THIS_MODULE->version); - else -#endif - strcpy(str, "N/A"); - AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, - 0); - AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum, - amdgpu_virt_fw_reserve_get_checksum( - adev->virt.fw_reserve.p_vf2pf, - pf2vf_size, - adev->virt.fw_reserve.checksum_key, 0)); + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + amdgpu_virt_write_vf2pf_data(adev); + + /* bad page handling for version 2 */ + if (adev->virt.fw_reserve.p_pf2vf->version == 2) { + pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf; + + bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) | + ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000); + bp_block_size = pf2vf_v2->bp_block_size; + + if (bp_block_size && !adev->virt.ras_init_done) + amdgpu_virt_init_ras_err_handler_data(adev); + + if (adev->virt.ras_init_done) + amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } + } else if (adev->bios != NULL) { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + + return; + } + + if (adev->virt.vf2pf_update_interval_ms != 0) { + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); + } +} + +void amdgpu_detect_virtualization(struct amdgpu_device *adev) +{ + uint32_t reg; + + switch (adev->asic_type) { + case CHIP_TONGA: + case CHIP_FIJI: + reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + break; + case CHIP_VEGA10: + case CHIP_VEGA20: + case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_SIENNA_CICHLID: + case CHIP_ARCTURUS: + case CHIP_ALDEBARAN: + reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER); + break; + default: /* other chip doesn't support SRIOV */ + reg = 0; + break; + } + + if (reg & 1) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; + + if (reg & 0x80000000) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + + if (!reg) { + if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; + } + + /* we have the ability to check now */ + if (amdgpu_sriov_vf(adev)) { + switch (adev->asic_type) { + case CHIP_TONGA: + case CHIP_FIJI: + vi_set_virt_ops(adev); + break; + case CHIP_VEGA10: + case CHIP_VEGA20: + case CHIP_ARCTURUS: + case CHIP_ALDEBARAN: + soc15_set_virt_ops(adev); + break; + case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_SIENNA_CICHLID: + nv_set_virt_ops(adev); + /* try send GPU_INIT_DATA request to host */ + amdgpu_virt_request_init_data(adev); + break; + default: /* other chip doesn't support SRIOV */ + DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type); + break; } } } -static uint32_t parse_clk(char *buf, bool min) +static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev) { - char *ptr = buf; - uint32_t clk = 0; - - do { - ptr = strchr(ptr, ':'); - if (!ptr) - break; - ptr+=2; - if (kstrtou32(ptr, 10, &clk)) - return 0; - } while (!min); - - return clk * 100; + return amdgpu_sriov_is_debug(adev) ? true : false; } -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) +static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev) { - char *buf = NULL; - uint32_t clk = 0; + return amdgpu_sriov_is_normal(adev) ? true : false; +} - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; +int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev) +{ + if (!amdgpu_sriov_vf(adev) || + amdgpu_virt_access_debugfs_is_kiq(adev)) + return 0; - adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); - clk = parse_clk(buf, lowest); + if (amdgpu_virt_access_debugfs_is_mmio(adev)) + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + else + return -EPERM; - kfree(buf); + return 0; +} - return clk; +void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; } -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) +enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev) { - char *buf = NULL; - uint32_t clk = 0; + enum amdgpu_sriov_vf_mode mode; + + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_sriov_is_pp_one_vf(adev)) + mode = SRIOV_VF_MODE_ONE_VF; + else + mode = SRIOV_VF_MODE_MULTI_VF; + } else { + mode = SRIOV_VF_MODE_BARE_METAL; + } - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; + return mode; +} - adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); - clk = parse_clk(buf, lowest); +void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, + struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, + struct amdgpu_video_codec_info *decode, uint32_t decode_array_size) +{ + uint32_t i; - kfree(buf); + if (!adev->virt.is_mm_bw_enabled) + return; - return clk; + if (encode) { + for (i = 0; i < encode_array_size; i++) { + encode[i].max_width = adev->virt.encode_max_dimension_pixels; + encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels; + if (encode[i].max_width > 0) + encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width; + else + encode[i].max_height = 0; + } + } + + if (decode) { + for (i = 0; i < decode_array_size; i++) { + decode[i].max_width = adev->virt.decode_max_dimension_pixels; + decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels; + if (decode[i].max_width > 0) + decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width; + else + decode[i].max_height = 0; + } + } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b0b2bdc750df..8d4c20bb71c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -24,12 +24,25 @@ #ifndef AMDGPU_VIRT_H #define AMDGPU_VIRT_H +#include "amdgv_sriovmsg.h" + #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +/* all asic after AI use this offset */ +#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 +/* tonga/fiji use this offset */ +#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 + +enum amdgpu_sriov_vf_mode { + SRIOV_VF_MODE_BARE_METAL = 0, + SRIOV_VF_MODE_ONE_VF, + SRIOV_VF_MODE_MULTI_VF, +}; + struct amdgpu_mm_table { struct amdgpu_bo *bo; uint32_t *cpu_addr; @@ -54,11 +67,10 @@ struct amdgpu_vf_error_buffer { struct amdgpu_virt_ops { int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); + int (*req_init_data)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); - int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); - int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); }; /* @@ -69,7 +81,10 @@ struct amdgpu_virt_fw_reserve { struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; unsigned int checksum_key; }; + /* + * Legacy GIM header + * * Defination between PF and VF * Structures forcibly aligned to 4 to keep the same style as PF. */ @@ -85,19 +100,24 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, /* VRAM LOST by GIM */ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, - /* HW PERF SIM in GIM */ - AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3), + /* MM bandwidth */ + AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, + /* PP ONE VF MODE in GIM */ + AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), + /* Indirect Reg Access enabled */ + AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5), }; -struct amd_sriov_msg_pf2vf_info_header { - /* the total structure size in byte. */ - uint32_t size; - /* version of this structure, written by the GIM */ - uint32_t version; - /* reserved */ - uint32_t reserved[2]; -} __aligned(4); -struct amdgim_pf2vf_info_v1 { +enum AMDGIM_REG_ACCESS_FLAG { + /* Use PSP to program IH_RB_CNTL */ + AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0), + /* Use RLC to program MMHUB regs */ + AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1), + /* Use RLC to program GC regs */ + AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2), +}; + +struct amdgim_pf2vf_info_v1 { /* header contains size and version */ struct amd_sriov_msg_pf2vf_info_header header; /* max_width * max_height */ @@ -116,46 +136,6 @@ struct amdgim_pf2vf_info_v1 { unsigned int checksum; } __aligned(4); -struct amdgim_pf2vf_info_v2 { - /* header contains size and version */ - struct amd_sriov_msg_pf2vf_info_header header; - /* use private key from mailbox 2 to create chueksum */ - uint32_t checksum; - /* The features flags of the GIM driver supports. */ - uint32_t feature_flags; - /* max_width * max_height */ - uint32_t uvd_enc_max_pixels_count; - /* 16x16 pixels/sec, codec independent */ - uint32_t uvd_enc_max_bandwidth; - /* max_width * max_height */ - uint32_t vce_enc_max_pixels_count; - /* 16x16 pixels/sec, codec independent */ - uint32_t vce_enc_max_bandwidth; - /* MEC FW position in kb from the start of VF visible frame buffer */ - uint64_t mecfw_kboffset; - /* MEC FW size in KB */ - uint32_t mecfw_ksize; - /* UVD FW position in kb from the start of VF visible frame buffer */ - uint64_t uvdfw_kboffset; - /* UVD FW size in KB */ - uint32_t uvdfw_ksize; - /* VCE FW position in kb from the start of VF visible frame buffer */ - uint64_t vcefw_kboffset; - /* VCE FW size in KB */ - uint32_t vcefw_ksize; - uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)]; -} __aligned(4); - - -struct amd_sriov_msg_vf2pf_info_header { - /* the total structure size in byte. */ - uint32_t size; - /*version of this structure, written by the guest */ - uint32_t version; - /* reserved */ - uint32_t reserved[2]; -} __aligned(4); - struct amdgim_vf2pf_info_v1 { /* header contains size and version */ struct amd_sriov_msg_vf2pf_info_header header; @@ -217,30 +197,16 @@ struct amdgim_vf2pf_info_v2 { uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; } __aligned(4); -#define AMDGPU_FW_VRAM_VF2PF_VER 2 -typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ; - -#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \ - do { \ - ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \ - } while (0) - -#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \ - do { \ - (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \ - } while (0) - -#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \ - do { \ - if (!adev->virt.fw_reserve.p_pf2vf) \ - *(val) = 0; \ - else { \ - if (adev->virt.fw_reserve.p_pf2vf->version == 1) \ - *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \ - if (adev->virt.fw_reserve.p_pf2vf->version == 2) \ - *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \ - } \ - } while (0) +struct amdgpu_virt_ras_err_handler_data { + /* point to bad page records array */ + struct eeprom_table_record *bps; + /* point to reserved bo array */ + struct amdgpu_bo **bps_bo; + /* the count of entries */ + int count; + /* last reserved entry's index + 1 */ + int last_reserved; +}; /* GPU virtualization */ struct amdgpu_virt { @@ -254,14 +220,30 @@ struct amdgpu_virt { struct work_struct flr_work; struct amdgpu_mm_table mm_table; const struct amdgpu_virt_ops *ops; - struct amdgpu_vf_error_buffer vf_errors; + struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; uint32_t gim_feature; - /* protect DPM events to GIM */ - struct mutex dpm_mutex; uint32_t reg_access_mode; + int req_init_data_ver; + bool tdr_debug; + struct amdgpu_virt_ras_err_handler_data *virt_eh_data; + bool ras_init_done; + uint32_t reg_access; + + /* vf2pf message */ + struct delayed_work vf2pf_work; + uint32_t vf2pf_update_interval_ms; + + /* multimedia bandwidth config */ + bool is_mm_bw_enabled; + uint32_t decode_max_dimension_pixels; + uint32_t decode_max_frame_pixels; + uint32_t encode_max_dimension_pixels; + uint32_t encode_max_frame_pixels; }; +struct amdgpu_video_codec_info; + #define amdgpu_sriov_enabled(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) @@ -274,6 +256,25 @@ struct amdgpu_virt { #define amdgpu_sriov_runtime(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) +#define amdgpu_sriov_fullaccess(adev) \ +(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) + +#define amdgpu_sriov_reg_indirect_en(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS))) + +#define amdgpu_sriov_reg_indirect_ih(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN))) + +#define amdgpu_sriov_reg_indirect_mmhub(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN))) + +#define amdgpu_sriov_reg_indirect_gc(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN))) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) @@ -286,26 +287,37 @@ static inline bool is_virtual_machine(void) #endif } -#define amdgim_is_hwperf(adev) \ - ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION) +#define amdgpu_sriov_is_pp_one_vf(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) +#define amdgpu_sriov_is_debug(adev) \ + ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) +#define amdgpu_sriov_is_normal(adev) \ + ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t rreg1, uint32_t ref, uint32_t mask); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); +void amdgpu_virt_request_init_data(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); -int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, - unsigned int key, - unsigned int chksum); +void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); +void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); +void amdgpu_detect_virtualization(struct amdgpu_device *adev); + +bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); +int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); +void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); + +enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev); + +void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, + struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, + struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c new file mode 100644 index 000000000000..ce982afeff91 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_simple_kms_helper.h> +#include <drm/drm_vblank.h> + +#include "amdgpu.h" +#ifdef CONFIG_DRM_AMDGPU_SI +#include "dce_v6_0.h" +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK +#include "dce_v8_0.h" +#endif +#include "dce_v10_0.h" +#include "dce_v11_0.h" +#include "ivsrcid/ivsrcid_vislands30.h" +#include "amdgpu_vkms.h" +#include "amdgpu_display.h" + +/** + * DOC: amdgpu_vkms + * + * The amdgpu vkms interface provides a virtual KMS interface for several use + * cases: devices without display hardware, platforms where the actual display + * hardware is not useful (e.g., servers), SR-IOV virtual functions, device + * emulation/simulation, and device bring up prior to display hardware being + * usable. We previously emulated a legacy KMS interface, but there was a desire + * to move to the atomic KMS interface. The vkms driver did everything we + * needed, but we wanted KMS support natively in the driver without buffer + * sharing and the ability to support an instance of VKMS per device. We first + * looked at splitting vkms into a stub driver and a helper module that other + * drivers could use to implement a virtual display, but this strategy ended up + * being messy due to driver specific callbacks needed for buffer management. + * Ultimately, it proved easier to import the vkms code as it mostly used core + * drm helpers anyway. + */ + +static const u32 amdgpu_vkms_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) +{ + struct amdgpu_vkms_output *output = container_of(timer, + struct amdgpu_vkms_output, + vblank_hrtimer); + struct drm_crtc *crtc = &output->crtc; + u64 ret_overrun; + bool ret; + + ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, + output->period_ns); + WARN_ON(ret_overrun != 1); + + ret = drm_crtc_handle_vblank(crtc); + if (!ret) + DRM_ERROR("amdgpu_vkms failure on handling vblank"); + + return HRTIMER_RESTART; +} + +static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + + drm_calc_timestamping_constants(crtc, &crtc->mode); + + hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate; + out->period_ns = ktime_set(0, vblank->framedur_ns); + hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); + + return 0; +} + +static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) +{ + struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + + hrtimer_cancel(&out->vblank_hrtimer); +} + +static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = crtc->index; + struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + + if (!READ_ONCE(vblank->enabled)) { + *vblank_time = ktime_get(); + return true; + } + + *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); + + if (WARN_ON(*vblank_time == vblank->time)) + return true; + + /* + * To prevent races we roll the hrtimer forward before we do any + * interrupt processing - this is how real hw works (the interrupt is + * only generated after all the vblank registers are updated) and what + * the vblank core expects. Therefore we need to always correct the + * timestampe by one frame. + */ + *vblank_time -= output->period_ns; + + return true; +} + +static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = amdgpu_vkms_enable_vblank, + .disable_vblank = amdgpu_vkms_disable_vblank, + .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp, +}; + +static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + drm_crtc_vblank_on(crtc); +} + +static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + drm_crtc_vblank_off(crtc); +} + +static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + if (crtc->state->event) { + spin_lock(&crtc->dev->event_lock); + + if (drm_crtc_vblank_get(crtc) != 0) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + else + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + + spin_unlock(&crtc->dev->event_lock); + + crtc->state->event = NULL; + } +} + +static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { + .atomic_flush = amdgpu_vkms_crtc_atomic_flush, + .atomic_enable = amdgpu_vkms_crtc_atomic_enable, + .atomic_disable = amdgpu_vkms_crtc_atomic_disable, +}; + +static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, struct drm_plane *cursor) +{ + int ret; + + ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, + &amdgpu_vkms_crtc_funcs, NULL); + if (ret) { + DRM_ERROR("Failed to init CRTC\n"); + return ret; + } + + drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); + + return ret; +} + +static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode = NULL; + unsigned i; + static const struct mode_size { + int w; + int h; + } common_modes[] = { + { 640, 480}, + { 720, 480}, + { 800, 600}, + { 848, 480}, + {1024, 768}, + {1152, 768}, + {1280, 720}, + {1280, 800}, + {1280, 854}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1400, 1050}, + {1680, 1050}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200}, + {2560, 1440}, + {4096, 3112}, + {3656, 2664}, + {3840, 2160}, + {4096, 2160}, + }; + + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { + mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); + drm_mode_probed_add(connector, mode); + } + + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + + return ARRAY_SIZE(common_modes); +} + +static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = { + .get_modes = amdgpu_vkms_conn_get_modes, +}; + +static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *old_state) +{ + return; +} + +static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc_state *crtc_state; + int ret; + + if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); + if (ret != 0) + return ret; + + /* for now primary plane must be visible and full screen */ + if (!new_plane_state->visible) + return -EINVAL; + + return 0; +} + +static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_device *adev; + struct amdgpu_bo *rbo; + struct list_head list; + struct ttm_validate_buffer tv; + struct ww_acquire_ctx ticket; + uint32_t domain; + int r; + + if (!new_state->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + afb = to_amdgpu_framebuffer(new_state->fb); + obj = new_state->fb->obj[0]; + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); + INIT_LIST_HEAD(&list); + + tv.bo = &rbo->tbo; + tv.num_shared = 1; + list_add(&tv.head, &list); + + r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + return r; + } + + if (plane->type != DRM_PLANE_TYPE_CURSOR) + domain = amdgpu_display_supported_domains(adev, rbo->flags); + else + domain = AMDGPU_GEM_DOMAIN_VRAM; + + r = amdgpu_bo_pin(rbo, domain); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); + ttm_eu_backoff_reservation(&ticket, &list); + return r; + } + + r = amdgpu_ttm_alloc_gart(&rbo->tbo); + if (unlikely(r != 0)) { + amdgpu_bo_unpin(rbo); + ttm_eu_backoff_reservation(&ticket, &list); + DRM_ERROR("%p bind failed\n", rbo); + return r; + } + + ttm_eu_backoff_reservation(&ticket, &list); + + afb->address = amdgpu_bo_gpu_offset(rbo); + + amdgpu_bo_ref(rbo); + + return 0; +} + +static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct amdgpu_bo *rbo; + int r; + + if (!old_state->fb) + return; + + rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } + + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + amdgpu_bo_unref(&rbo); +} + +static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = { + .atomic_update = amdgpu_vkms_plane_atomic_update, + .atomic_check = amdgpu_vkms_plane_atomic_check, + .prepare_fb = amdgpu_vkms_prepare_fb, + .cleanup_fb = amdgpu_vkms_cleanup_fb, +}; + +static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, + enum drm_plane_type type, + int index) +{ + struct drm_plane *plane; + int ret; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + ret = drm_universal_plane_init(dev, plane, 1 << index, + &amdgpu_vkms_plane_funcs, + amdgpu_vkms_formats, + ARRAY_SIZE(amdgpu_vkms_formats), + NULL, type, NULL); + if (ret) { + kfree(plane); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs); + + return plane; +} + +int amdgpu_vkms_output_init(struct drm_device *dev, + struct amdgpu_vkms_output *output, int index) +{ + struct drm_connector *connector = &output->connector; + struct drm_encoder *encoder = &output->encoder; + struct drm_crtc *crtc = &output->crtc; + struct drm_plane *primary, *cursor = NULL; + int ret; + + primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor); + if (ret) + goto err_crtc; + + ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret) { + DRM_ERROR("Failed to init connector\n"); + goto err_connector; + } + + drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs); + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); + if (ret) { + DRM_ERROR("Failed to init encoder\n"); + goto err_encoder; + } + encoder->possible_crtcs = 1 << index; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) { + DRM_ERROR("Failed to attach connector to encoder\n"); + goto err_attach; + } + + drm_mode_config_reset(dev); + + return 0; + +err_attach: + drm_encoder_cleanup(encoder); + +err_encoder: + drm_connector_cleanup(connector); + +err_connector: + drm_crtc_cleanup(crtc); + +err_crtc: + drm_plane_cleanup(primary); + + return ret; +} + +const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { + .fb_create = amdgpu_display_user_framebuffer_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int amdgpu_vkms_sw_init(void *handle) +{ + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev_to_drm(adev)->max_vblank_count = 0; + + adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; + + adev_to_drm(adev)->mode_config.max_width = XRES_MAX; + adev_to_drm(adev)->mode_config.max_height = YRES_MAX; + + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; + + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; + + r = amdgpu_display_modeset_create_props(adev); + if (r) + return r; + + adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); + if (!adev->amdgpu_vkms_output) + return -ENOMEM; + + /* allocate crtcs, encoders, connectors */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i); + if (r) + return r; + } + + drm_kms_helper_poll_init(adev_to_drm(adev)); + + adev->mode_info.mode_config_initialized = true; + return 0; +} + +static int amdgpu_vkms_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i = 0; + + for (i = 0; i < adev->mode_info.num_crtc; i++) + if (adev->mode_info.crtcs[i]) + hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); + + kfree(adev->mode_info.bios_hardcoded_edid); + kfree(adev->amdgpu_vkms_output); + + drm_kms_helper_poll_fini(adev_to_drm(adev)); + + adev->mode_info.mode_config_initialized = false; + return 0; +} + +static int amdgpu_vkms_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + dce_v6_0_disable_dce(adev); + break; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + dce_v8_0_disable_dce(adev); + break; +#endif + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_VEGAM: + dce_v11_0_disable_dce(adev); + break; + case CHIP_TOPAZ: +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + /* no DCE */ + break; + default: + break; + } + return 0; +} + +static int amdgpu_vkms_hw_fini(void *handle) +{ + return 0; +} + +static int amdgpu_vkms_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = drm_mode_config_helper_suspend(adev_to_drm(adev)); + if (r) + return r; + return amdgpu_vkms_hw_fini(handle); +} + +static int amdgpu_vkms_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_vkms_hw_init(handle); + if (r) + return r; + return drm_mode_config_helper_resume(adev_to_drm(adev)); +} + +static bool amdgpu_vkms_is_idle(void *handle) +{ + return true; +} + +static int amdgpu_vkms_wait_for_idle(void *handle) +{ + return 0; +} + +static int amdgpu_vkms_soft_reset(void *handle) +{ + return 0; +} + +static int amdgpu_vkms_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int amdgpu_vkms_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { + .name = "amdgpu_vkms", + .early_init = NULL, + .late_init = NULL, + .sw_init = amdgpu_vkms_sw_init, + .sw_fini = amdgpu_vkms_sw_fini, + .hw_init = amdgpu_vkms_hw_init, + .hw_fini = amdgpu_vkms_hw_fini, + .suspend = amdgpu_vkms_suspend, + .resume = amdgpu_vkms_resume, + .is_idle = amdgpu_vkms_is_idle, + .wait_for_idle = amdgpu_vkms_wait_for_idle, + .soft_reset = amdgpu_vkms_soft_reset, + .set_clockgating_state = amdgpu_vkms_set_clockgating_state, + .set_powergating_state = amdgpu_vkms_set_powergating_state, +}; + +const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_vkms_ip_funcs, +}; + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h new file mode 100644 index 000000000000..97f1b79c0724 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _AMDGPU_VKMS_H_ +#define _AMDGPU_VKMS_H_ + +#define XRES_DEF 1024 +#define YRES_DEF 768 + +#define XRES_MAX 16384 +#define YRES_MAX 16384 + +#define drm_crtc_to_amdgpu_vkms_output(target) \ + container_of(target, struct amdgpu_vkms_output, crtc) + +extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block; + +struct amdgpu_vkms_output { + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; + struct hrtimer vblank_hrtimer; + ktime_t period_ns; + struct drm_pending_vblank_event *event; +}; + +#endif /* _AMDGPU_VKMS_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 598c24505c73..0e7dc23f78e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,16 +25,22 @@ * Alex Deucher * Jerome Glisse */ + #include <linux/dma-fence-array.h> #include <linux/interval_tree_generic.h> #include <linux/idr.h> +#include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_gmc.h" #include "amdgpu_xgmi.h" +#include "amdgpu_dma_buf.h" +#include "amdgpu_res_cursor.h" +#include "kfd_svm.h" /** * DOC: GPUVM @@ -83,6 +89,72 @@ struct amdgpu_prt_cb { }; /** + * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping + * + * @adev: amdgpu_device pointer + * @vm: amdgpu_vm pointer + * @pasid: the pasid the VM is using on this GPU + * + * Set the pasid this VM is using on this GPU, can also be used to remove the + * pasid by passing in zero. + * + */ +int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, + u32 pasid) +{ + int r; + + if (vm->pasid == pasid) + return 0; + + if (vm->pasid) { + r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); + if (r < 0) + return r; + + vm->pasid = 0; + } + + if (pasid) { + r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, + GFP_KERNEL)); + if (r < 0) + return r; + + vm->pasid = pasid; + } + + + return 0; +} + +/* + * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS + * happens while holding this lock anywhere to prevent deadlocks when + * an MMU notifier runs in reclaim-FS context. + */ +static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) +{ + mutex_lock(&vm->eviction_lock); + vm->saved_flags = memalloc_noreclaim_save(); +} + +static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) +{ + if (mutex_trylock(&vm->eviction_lock)) { + vm->saved_flags = memalloc_noreclaim_save(); + return 1; + } + return 0; +} + +static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) +{ + memalloc_noreclaim_restore(vm->saved_flags); + mutex_unlock(&vm->eviction_lock); +} + +/** * amdgpu_vm_level_shift - return the addr shift for each level * * @adev: amdgpu_device pointer @@ -94,23 +166,17 @@ struct amdgpu_prt_cb { static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, unsigned level) { - unsigned shift = 0xff; - switch (level) { case AMDGPU_VM_PDB2: case AMDGPU_VM_PDB1: case AMDGPU_VM_PDB0: - shift = 9 * (AMDGPU_VM_PDB0 - level) + + return 9 * (AMDGPU_VM_PDB0 - level) + adev->vm_manager.block_size; - break; case AMDGPU_VM_PTB: - shift = 0; - break; + return 0; default: - dev_err(adev->dev, "the level%d isn't supported.\n", level); + return ~0; } - - return shift; } /** @@ -209,19 +275,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) else list_move_tail(&vm_bo->vm_status, &vm->evicted); } - -/** - * amdgpu_vm_bo_relocated - vm_bo is reloacted - * - * @vm_bo: vm_bo which is relocated - * - * State for PDs/PTs which needs to update their parent PD. - */ -static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) -{ - list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); -} - /** * amdgpu_vm_bo_moved - vm_bo is moved * @@ -265,6 +318,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) } /** + * amdgpu_vm_bo_relocated - vm_bo is reloacted + * + * @vm_bo: vm_bo which is relocated + * + * State for PDs/PTs which needs to update their parent PD. + * For the root PD, just move to idle state. + */ +static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) +{ + if (vm_bo->bo->parent) + list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); + else + amdgpu_vm_bo_idle(vm_bo); +} + +/** * amdgpu_vm_bo_done - vm_bo is done * * @vm_bo: vm_bo which is now done @@ -275,7 +344,7 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) { spin_lock(&vm_bo->vm->invalidated_lock); - list_del_init(&vm_bo->vm_status); + list_move(&vm_bo->vm_status, &vm_bo->vm->done); spin_unlock(&vm_bo->vm->invalidated_lock); } @@ -303,7 +372,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, base->next = bo->vm_bo; bo->vm_bo = base; - if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) + if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) return; vm->bulk_moveable = false; @@ -313,7 +382,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, amdgpu_vm_bo_idle(base); if (bo->preferred_domains & - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) + amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) return; /* @@ -332,14 +401,14 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * Helper to get the parent entry for the child page table. NULL if we are at * the root page directory. */ -static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) +static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt) { - struct amdgpu_bo *parent = pt->base.bo->parent; + struct amdgpu_bo *parent = pt->bo->parent; if (!parent) return NULL; - return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); + return parent->vm_bo; } /* @@ -347,8 +416,8 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) */ struct amdgpu_vm_pt_cursor { uint64_t pfn; - struct amdgpu_vm_pt *parent; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *parent; + struct amdgpu_vm_bo_base *entry; unsigned level; }; @@ -387,17 +456,17 @@ static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, { unsigned mask, shift, idx; - if (!cursor->entry->entries) + if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry || + !cursor->entry->bo) return false; - BUG_ON(!cursor->entry->base.bo); mask = amdgpu_vm_entries_mask(adev, cursor->level); shift = amdgpu_vm_level_shift(adev, cursor->level); ++cursor->level; idx = (cursor->pfn >> shift) & mask; cursor->parent = cursor->entry; - cursor->entry = &cursor->entry->entries[idx]; + cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx]; return true; } @@ -424,7 +493,7 @@ static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, shift = amdgpu_vm_level_shift(adev, cursor->level - 1); num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); - if (cursor->entry == &cursor->parent->entries[num_entries - 1]) + if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1]) return false; cursor->pfn += 1ULL << shift; @@ -510,7 +579,7 @@ static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, * True when the search should continue, false otherwise. */ static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, - struct amdgpu_vm_pt *entry) + struct amdgpu_vm_bo_base *entry) { return entry && (!start || entry != start->entry); } @@ -561,9 +630,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct amdgpu_bo_list_entry *entry) { entry->priority = 0; - entry->tv.bo = &vm->root.base.bo->tbo; - /* One for the VM updates, one for TTM and one for the CS job */ - entry->tv.num_shared = 3; + entry->tv.bo = &vm->root.bo->tbo; + /* Two for VM updates, one for TTM and one for the CS job */ + entry->tv.num_shared = 4; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } @@ -584,7 +653,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) if (!amdgpu_bo_is_amdgpu_bo(bo)) return; - if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) + if (bo->pin_count) return; abo = ttm_to_amdgpu_bo(bo); @@ -593,7 +662,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) vm->bulk_moveable = false; } @@ -613,27 +682,30 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm_bo_base *bo_base; if (vm->bulk_moveable) { - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&adev->mman.bdev.lru_lock); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&adev->mman.bdev.lru_lock); return; } memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&adev->mman.bdev.lru_lock); list_for_each_entry(bo_base, &vm->idle, vm_status) { struct amdgpu_bo *bo = bo_base->bo; + struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); - if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo, + ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource, + &vm->lru_bulk_move); + if (shadow) + ttm_bo_move_to_lru_tail(&shadow->tbo, + shadow->tbo.resource, &vm->lru_bulk_move); } - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&adev->mman.bdev.lru_lock); vm->bulk_moveable = true; } @@ -656,29 +728,36 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { struct amdgpu_vm_bo_base *bo_base, *tmp; - int r = 0; + int r; vm->bulk_moveable &= list_empty(&vm->evicted); list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; + struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); r = validate(param, bo); if (r) - break; + return r; + if (shadow) { + r = validate(param, shadow); + if (r) + return r; + } if (bo->tbo.type != ttm_bo_type_kernel) { amdgpu_vm_bo_moved(bo_base); } else { - vm->update_funcs->map_table(bo); - if (bo->parent) - amdgpu_vm_bo_relocated(bo_base); - else - amdgpu_vm_bo_idle(bo_base); + vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); + amdgpu_vm_bo_relocated(bo_base); } } - return r; + amdgpu_vm_eviction_lock(vm); + vm->evicting = false; + amdgpu_vm_eviction_unlock(vm); + + return 0; } /** @@ -701,8 +780,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) * * @adev: amdgpu_device pointer * @vm: VM to clear BO from - * @bo: BO to clear - * @direct: use a direct update + * @vmbo: BO to clear + * @immediate: use an immediate update * * Root PD needs to be reserved when calling this. * @@ -711,16 +790,17 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo, - bool direct) + struct amdgpu_bo_vm *vmbo, + bool immediate) { struct ttm_operation_ctx ctx = { true, false }; unsigned level = adev->vm_manager.root_level; struct amdgpu_vm_update_params params; - struct amdgpu_bo *ancestor = bo; + struct amdgpu_bo *ancestor = &vmbo->bo; + struct amdgpu_bo *bo = &vmbo->bo; unsigned entries, ats_entries; uint64_t addr; - int r; + int r, idx; /* Figure out our place in the hierarchy */ if (ancestor->parent) { @@ -741,11 +821,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, entries -= ats_entries; } else { - struct amdgpu_vm_pt *pt; + struct amdgpu_vm_bo_base *pt; - pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); + pt = ancestor->vm_bo; ats_entries = amdgpu_vm_num_ats_entries(adev); - if ((pt - vm->root.entries) >= ats_entries) { + if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { ats_entries = 0; } else { ats_entries = entries; @@ -757,25 +837,29 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) return r; - if (bo->shadow) { - r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, - &ctx); + if (vmbo->shadow) { + struct amdgpu_bo *shadow = vmbo->shadow; + + r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx); if (r) return r; } - r = vm->update_funcs->map_table(bo); + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return -ENODEV; + + r = vm->update_funcs->map_table(vmbo); if (r) - return r; + goto exit; memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; - params.direct = direct; + params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) - return r; + goto exit; addr = 0; if (ats_entries) { @@ -788,10 +872,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); } - r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries, + r = vm->update_funcs->update(¶ms, vmbo, addr, 0, ats_entries, value, flags); if (r) - return r; + goto exit; addr += ats_entries * 8; } @@ -811,44 +895,98 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, } } - r = vm->update_funcs->update(¶ms, bo, addr, 0, entries, + r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries, value, flags); if (r) - return r; + goto exit; } - return vm->update_funcs->commit(¶ms, NULL); + r = vm->update_funcs->commit(¶ms, NULL); +exit: + drm_dev_exit(idx); + return r; } /** - * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation + * amdgpu_vm_pt_create - create bo for PD/PT * * @adev: amdgpu_device pointer * @vm: requesting vm * @level: the page table level - * @direct: use a direct update - * @bp: resulting BO allocation parameters + * @immediate: use a immediate update + * @vmbo: pointer to the buffer object pointer */ -static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int level, bool direct, - struct amdgpu_bo_param *bp) +static int amdgpu_vm_pt_create(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + int level, bool immediate, + struct amdgpu_bo_vm **vmbo) { - memset(bp, 0, sizeof(*bp)); + struct amdgpu_bo_param bp; + struct amdgpu_bo *bo; + struct dma_resv *resv; + unsigned int num_entries; + int r; + + memset(&bp, 0, sizeof(bp)); - bp->size = amdgpu_vm_bo_size(adev, level); - bp->byte_align = AMDGPU_GPU_PAGE_SIZE; - bp->domain = AMDGPU_GEM_DOMAIN_VRAM; - bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); - bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + bp.size = amdgpu_vm_bo_size(adev, level); + bp.byte_align = AMDGPU_GPU_PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain); + bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_CPU_GTT_USWC; + + if (level < AMDGPU_VM_PTB) + num_entries = amdgpu_vm_num_entries(adev, level); + else + num_entries = 0; + + bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries); + if (vm->use_cpu_for_update) - bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - else if (!vm->root.base.bo || vm->root.base.bo->shadow) - bp->flags |= AMDGPU_GEM_CREATE_SHADOW; - bp->type = ttm_bo_type_kernel; - bp->no_wait_gpu = direct; - if (vm->root.base.bo) - bp->resv = vm->root.base.bo->tbo.base.resv; + bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + + bp.type = ttm_bo_type_kernel; + bp.no_wait_gpu = immediate; + if (vm->root.bo) + bp.resv = vm->root.bo->tbo.base.resv; + + r = amdgpu_bo_create_vm(adev, &bp, vmbo); + if (r) + return r; + + bo = &(*vmbo)->bo; + if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) { + (*vmbo)->shadow = NULL; + return 0; + } + + if (!bp.resv) + WARN_ON(dma_resv_lock(bo->tbo.base.resv, + NULL)); + resv = bp.resv; + memset(&bp, 0, sizeof(bp)); + bp.size = amdgpu_vm_bo_size(adev, level); + bp.domain = AMDGPU_GEM_DOMAIN_GTT; + bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; + bp.type = ttm_bo_type_kernel; + bp.resv = bo->tbo.base.resv; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + + r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); + + if (!resv) + dma_resv_unlock(bo->tbo.base.resv); + + if (r) { + amdgpu_bo_unref(&bo); + return r; + } + + (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); + amdgpu_bo_add_to_shadow_list(*vmbo); + + return 0; } /** @@ -857,7 +995,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, * @adev: amdgpu_device pointer * @vm: VM to allocate page tables for * @cursor: Which page table to allocate - * @direct: use a direct update + * @immediate: use an immediate update * * Make sure a specific page table or directory is allocated. * @@ -868,40 +1006,27 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_pt_cursor *cursor, - bool direct) + bool immediate) { - struct amdgpu_vm_pt *entry = cursor->entry; - struct amdgpu_bo_param bp; - struct amdgpu_bo *pt; + struct amdgpu_vm_bo_base *entry = cursor->entry; + struct amdgpu_bo *pt_bo; + struct amdgpu_bo_vm *pt; int r; - if (cursor->level < AMDGPU_VM_PTB && !entry->entries) { - unsigned num_entries; - - num_entries = amdgpu_vm_num_entries(adev, cursor->level); - entry->entries = kvmalloc_array(num_entries, - sizeof(*entry->entries), - GFP_KERNEL | __GFP_ZERO); - if (!entry->entries) - return -ENOMEM; - } - - if (entry->base.bo) + if (entry->bo) return 0; - amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); - - r = amdgpu_bo_create(adev, &bp, &pt); + r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); if (r) return r; /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ - pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); - amdgpu_vm_bo_base_init(&entry->base, vm, pt); - - r = amdgpu_vm_clear_bo(adev, vm, pt, direct); + pt_bo = &pt->bo; + pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo); + amdgpu_vm_bo_base_init(entry, vm, pt_bo); + r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); if (r) goto error_free_pt; @@ -909,7 +1034,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, error_free_pt: amdgpu_bo_unref(&pt->shadow); - amdgpu_bo_unref(&pt); + amdgpu_bo_unref(&pt_bo); return r; } @@ -918,16 +1043,17 @@ error_free_pt: * * @entry: PDE to free */ -static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) +static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry) { - if (entry->base.bo) { - entry->base.bo->vm_bo = NULL; - list_del(&entry->base.vm_status); - amdgpu_bo_unref(&entry->base.bo->shadow); - amdgpu_bo_unref(&entry->base.bo); - } - kvfree(entry->entries); - entry->entries = NULL; + struct amdgpu_bo *shadow; + + if (!entry->bo) + return; + shadow = amdgpu_bo_shadowed(entry->bo); + entry->bo->vm_bo = NULL; + list_del(&entry->vm_status); + amdgpu_bo_unref(&shadow); + amdgpu_bo_unref(&entry->bo); } /** @@ -944,7 +1070,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *start) { struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; vm->bulk_moveable = false; @@ -1056,8 +1182,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence *fence = NULL; bool pasid_mapping_needed = false; unsigned patch_offset = 0; + bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); int r; + if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); + if (amdgpu_vmid_had_gpu_reset(adev, id)) { gds_switch_needed = true; vm_flush_needed = true; @@ -1094,7 +1224,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); if (vm_flush_needed || pasid_mapping_needed) { - r = amdgpu_fence_emit(ring, &fence, 0); + r = amdgpu_fence_emit(ring, &fence, NULL, 0); if (r) return r; } @@ -1208,10 +1338,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) */ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, struct amdgpu_vm *vm, - struct amdgpu_vm_pt *entry) + struct amdgpu_vm_bo_base *entry) { - struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); - struct amdgpu_bo *bo = parent->base.bo, *pbo; + struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); + struct amdgpu_bo *bo = parent->bo, *pbo; uint64_t pde, pt, flags; unsigned level; @@ -1219,9 +1349,10 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, pbo = pbo->parent; level += params->adev->vm_manager.root_level; - amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); - pde = (entry - parent->entries) * 8; - return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); + amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags); + pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8; + return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, + 1, 0, flags); } /** @@ -1236,11 +1367,11 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) - if (entry->base.bo && !entry->base.moved) - amdgpu_vm_bo_relocated(&entry->base); + if (entry->bo && !entry->moved) + amdgpu_vm_bo_relocated(entry); } /** @@ -1248,7 +1379,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requested vm - * @direct: submit directly to the paging queue + * @immediate: submit immediately to the paging queue * * Makes sure all directories are up to date. * @@ -1256,29 +1387,33 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, * 0 for success, error for failure. */ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, - struct amdgpu_vm *vm, bool direct) + struct amdgpu_vm *vm, bool immediate) { struct amdgpu_vm_update_params params; - int r; + int r, idx; if (list_empty(&vm->relocated)) return 0; + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return -ENODEV; + memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; - params.direct = direct; + params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) - return r; + goto exit; while (!list_empty(&vm->relocated)) { - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; - entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, - base.vm_status); - amdgpu_vm_bo_idle(&entry->base); + entry = list_first_entry(&vm->relocated, + struct amdgpu_vm_bo_base, + vm_status); + amdgpu_vm_bo_idle(entry); r = amdgpu_vm_update_pde(¶ms, vm, entry); if (r) @@ -1288,10 +1423,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, &vm->last_update); if (r) goto error; + drm_dev_exit(idx); return 0; error: amdgpu_vm_invalidate_pds(adev, vm); +exit: + drm_dev_exit(idx); return r; } @@ -1301,9 +1439,9 @@ error: * Make sure to set the right flags for the PTEs at the desired level. */ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, - struct amdgpu_bo *bo, unsigned level, + struct amdgpu_bo_vm *pt, unsigned int level, uint64_t pe, uint64_t addr, - unsigned count, uint32_t incr, + unsigned int count, uint32_t incr, uint64_t flags) { @@ -1319,7 +1457,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, flags |= AMDGPU_PTE_EXECUTABLE; } - params->vm->update_funcs->update(params, bo, pe, addr, count, incr, + params->vm->update_funcs->update(params, pt, pe, addr, count, incr, flags); } @@ -1418,27 +1556,24 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, uint64_t incr, entry_end, pe_start; struct amdgpu_bo *pt; - /* make sure that the page tables covering the address range are - * actually allocated - */ - r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, - params->direct); - if (r) - return r; - - pt = cursor.entry->base.bo; - - /* The root level can't be a huge page */ - if (cursor.level == adev->vm_manager.root_level) { - if (!amdgpu_vm_pt_descendant(adev, &cursor)) - return -ENOENT; - continue; + if (!params->unlocked) { + /* make sure that the page tables covering the + * address range are actually allocated + */ + r = amdgpu_vm_alloc_pts(params->adev, params->vm, + &cursor, params->immediate); + if (r) + return r; } shift = amdgpu_vm_level_shift(adev, cursor.level); parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1); - if (adev->asic_type < CHIP_VEGA10 && - (flags & AMDGPU_PTE_VALID)) { + if (params->unlocked) { + /* Unlocked updates are only allowed on the leaves */ + if (amdgpu_vm_pt_descendant(adev, &cursor)) + continue; + } else if (adev->asic_type < CHIP_VEGA10 && + (flags & AMDGPU_PTE_VALID)) { /* No huge page support before GMC v9 */ if (cursor.level != AMDGPU_VM_PTB) { if (!amdgpu_vm_pt_descendant(adev, &cursor)) @@ -1450,38 +1585,64 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * smaller than the address shift. Go to the next * child entry and try again. */ - if (!amdgpu_vm_pt_descendant(adev, &cursor)) - return -ENOENT; - continue; - } else if (frag >= parent_shift && - cursor.level - 1 != adev->vm_manager.root_level) { + if (amdgpu_vm_pt_descendant(adev, &cursor)) + continue; + } else if (frag >= parent_shift) { /* If the fragment size is even larger than the parent - * shift we should go up one level and check it again - * unless one level up is the root level. + * shift we should go up one level and check it again. */ if (!amdgpu_vm_pt_ancestor(&cursor)) - return -ENOENT; + return -EINVAL; continue; } + pt = cursor.entry->bo; + if (!pt) { + /* We need all PDs and PTs for mapping something, */ + if (flags & AMDGPU_PTE_VALID) + return -ENOENT; + + /* but unmapping something can happen at a higher + * level. + */ + if (!amdgpu_vm_pt_ancestor(&cursor)) + return -EINVAL; + + pt = cursor.entry->bo; + shift = parent_shift; + frag_end = max(frag_end, ALIGN(frag_start + 1, + 1ULL << shift)); + } + /* Looks good so far, calculate parameters for the update */ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; mask = amdgpu_vm_entries_mask(adev, cursor.level); pe_start = ((cursor.pfn >> shift) & mask) * 8; - entry_end = (uint64_t)(mask + 1) << shift; + entry_end = ((uint64_t)mask + 1) << shift; entry_end += cursor.pfn & ~(entry_end - 1); entry_end = min(entry_end, end); do { + struct amdgpu_vm *vm = params->vm; uint64_t upd_end = min(entry_end, frag_end); unsigned nptes = (upd_end - frag_start) >> shift; + uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag); + + /* This can happen when we set higher level PDs to + * silent to stop fault floods. + */ + nptes = max(nptes, 1u); - amdgpu_vm_update_flags(params, pt, cursor.level, - pe_start, dst, nptes, incr, - flags | AMDGPU_PTE_FRAG(frag)); + trace_amdgpu_vm_update_ptes(params, frag_start, upd_end, + nptes, dst, incr, upd_flags, + vm->task_info.pid, + vm->immediate.fence_context); + amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt), + cursor.level, pe_start, dst, + nptes, incr, upd_flags); pe_start += nptes * 8; - dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; + dst += nptes * incr; frag_start = upd_end; if (frag_start >= frag_end) { @@ -1501,7 +1662,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * completely covered by the range and so potentially still in use. */ while (cursor.pfn < frag_start) { - amdgpu_vm_free_pts(adev, params->vm, &cursor); + /* Make sure previous mapping is freed */ + if (cursor.entry->bo) { + params->table_freed = true; + amdgpu_vm_free_pts(adev, params->vm, &cursor); + } amdgpu_vm_pt_next(adev, &cursor); } @@ -1517,175 +1682,193 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * - * @adev: amdgpu_device pointer + * @adev: amdgpu_device pointer of the VM + * @bo_adev: amdgpu_device pointer of the mapped BO * @vm: requested vm - * @direct: direct submission in a page fault - * @exclusive: fence we need to sync to + * @immediate: immediate submission in a page fault + * @unlocked: unlocked invalidation during MM callback + * @resv: fences we need to sync to * @start: start of mapped range * @last: last mapped entry * @flags: flags for the entries - * @addr: addr to set the area to + * @offset: offset into nodes and pages_addr + * @res: ttm_resource to map * @pages_addr: DMA addresses to use for mapping * @fence: optional resulting fence + * @table_freed: return true if page table is freed * * Fill in the page table entries between @start and @last. * * Returns: * 0 for success, -EINVAL for failure. */ -static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, - struct amdgpu_vm *vm, bool direct, - struct dma_fence *exclusive, - uint64_t start, uint64_t last, - uint64_t flags, uint64_t addr, - dma_addr_t *pages_addr, - struct dma_fence **fence) +int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_device *bo_adev, + struct amdgpu_vm *vm, bool immediate, + bool unlocked, struct dma_resv *resv, + uint64_t start, uint64_t last, + uint64_t flags, uint64_t offset, + struct ttm_resource *res, + dma_addr_t *pages_addr, + struct dma_fence **fence, + bool *table_freed) { struct amdgpu_vm_update_params params; - void *owner = AMDGPU_FENCE_OWNER_VM; - int r; + struct amdgpu_res_cursor cursor; + enum amdgpu_sync_mode sync_mode; + int r, idx; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return -ENODEV; memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; - params.direct = direct; + params.immediate = immediate; params.pages_addr = pages_addr; + params.unlocked = unlocked; - /* sync to everything except eviction fences on unmapping */ - if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_KFD; - - r = vm->update_funcs->prepare(¶ms, owner, exclusive); - if (r) - return r; - - r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); - if (r) - return r; - - return vm->update_funcs->commit(¶ms, fence); -} - -/** - * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks - * - * @adev: amdgpu_device pointer - * @exclusive: fence we need to sync to - * @pages_addr: DMA addresses to use for mapping - * @vm: requested vm - * @mapping: mapped range and flags to use for the update - * @flags: HW flags for the mapping - * @bo_adev: amdgpu_device pointer that bo actually been allocated - * @nodes: array of drm_mm_nodes with the MC addresses - * @fence: optional resulting fence - * - * Split the mapping into smaller chunks so that each update fits - * into a SDMA IB. - * - * Returns: - * 0 for success, -EINVAL for failure. - */ -static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, - struct dma_fence *exclusive, - dma_addr_t *pages_addr, - struct amdgpu_vm *vm, - struct amdgpu_bo_va_mapping *mapping, - uint64_t flags, - struct amdgpu_device *bo_adev, - struct drm_mm_node *nodes, - struct dma_fence **fence) -{ - unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size; - uint64_t pfn, start = mapping->start; - int r; - - /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here - * but in case of something, we filter the flags in first place + /* Implicitly sync to command submissions in the same VM before + * unmapping. Sync to moving fences before mapping. */ - if (!(mapping->flags & AMDGPU_PTE_READABLE)) - flags &= ~AMDGPU_PTE_READABLE; - if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) - flags &= ~AMDGPU_PTE_WRITEABLE; + if (!(flags & AMDGPU_PTE_VALID)) + sync_mode = AMDGPU_SYNC_EQ_OWNER; + else + sync_mode = AMDGPU_SYNC_EXPLICIT; - /* Apply ASIC specific mapping flags */ - amdgpu_gmc_get_vm_pte(adev, mapping, &flags); + amdgpu_vm_eviction_lock(vm); + if (vm->evicting) { + r = -EBUSY; + goto error_unlock; + } - trace_amdgpu_vm_bo_update(mapping); + if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { + struct dma_fence *tmp = dma_fence_get_stub(); - pfn = mapping->offset >> PAGE_SHIFT; - if (nodes) { - while (pfn >= nodes->size) { - pfn -= nodes->size; - ++nodes; - } + amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); + swap(vm->last_unlocked, tmp); + dma_fence_put(tmp); } - do { - dma_addr_t *dma_addr = NULL; - uint64_t max_entries; - uint64_t addr, last; + r = vm->update_funcs->prepare(¶ms, resv, sync_mode); + if (r) + goto error_unlock; - if (nodes) { - addr = nodes->start << PAGE_SHIFT; - max_entries = (nodes->size - pfn) * - AMDGPU_GPU_PAGES_IN_CPU_PAGE; - } else { - addr = 0; - max_entries = S64_MAX; - } + amdgpu_res_first(pages_addr ? NULL : res, offset, + (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); + while (cursor.remaining) { + uint64_t tmp, num_entries, addr; + num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT; if (pages_addr) { - uint64_t count; + bool contiguous = true; - for (count = 1; - count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE; - ++count) { - uint64_t idx = pfn + count; + if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) { + uint64_t pfn = cursor.start >> PAGE_SHIFT; + uint64_t count; - if (pages_addr[idx] != - (pages_addr[idx - 1] + PAGE_SIZE)) - break; + contiguous = pages_addr[pfn + 1] == + pages_addr[pfn] + PAGE_SIZE; + + tmp = num_entries / + AMDGPU_GPU_PAGES_IN_CPU_PAGE; + for (count = 2; count < tmp; ++count) { + uint64_t idx = pfn + count; + + if (contiguous != (pages_addr[idx] == + pages_addr[idx - 1] + PAGE_SIZE)) + break; + } + num_entries = count * + AMDGPU_GPU_PAGES_IN_CPU_PAGE; } - if (count < min_linear_pages) { - addr = pfn << PAGE_SHIFT; - dma_addr = pages_addr; + if (!contiguous) { + addr = cursor.start; + params.pages_addr = pages_addr; } else { - addr = pages_addr[pfn]; - max_entries = count * - AMDGPU_GPU_PAGES_IN_CPU_PAGE; + addr = pages_addr[cursor.start >> PAGE_SHIFT]; + params.pages_addr = NULL; } - } else if (flags & AMDGPU_PTE_VALID) { - addr += bo_adev->vm_manager.vram_base_offset; - addr += pfn << PAGE_SHIFT; + } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { + addr = bo_adev->vm_manager.vram_base_offset + + cursor.start; + } else { + addr = 0; } - last = min((uint64_t)mapping->last, start + max_entries - 1); - r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, - start, last, flags, addr, - dma_addr, fence); + tmp = start + num_entries; + r = amdgpu_vm_update_ptes(¶ms, start, tmp, addr, flags); if (r) - return r; + goto error_unlock; - pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE; - if (nodes && nodes->size == pfn) { - pfn = 0; - ++nodes; - } - start = last + 1; + amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE); + start = tmp; + } - } while (unlikely(start != mapping->last + 1)); + r = vm->update_funcs->commit(¶ms, fence); - return 0; + if (table_freed) + *table_freed = *table_freed || params.table_freed; + +error_unlock: + amdgpu_vm_eviction_unlock(vm); + drm_dev_exit(idx); + return r; } +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem) +{ + struct amdgpu_bo_va *bo_va, *tmp; + + list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + spin_lock(&vm->invalidated_lock); + list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + spin_unlock(&vm->invalidated_lock); +} /** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object * @clear: if true clear the entries + * @table_freed: return true if page table is freed * * Fill in the page table entries for @bo_va. * @@ -1693,43 +1876,53 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * 0 for success, -EINVAL for failure. */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear) + bool clear, bool *table_freed) { struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - struct ttm_mem_reg *mem; - struct drm_mm_node *nodes; - struct dma_fence *exclusive, **last_update; + struct ttm_resource *mem; + struct dma_fence **last_update; + struct dma_resv *resv; uint64_t flags; struct amdgpu_device *bo_adev = adev; int r; if (clear || !bo) { mem = NULL; - nodes = NULL; - exclusive = NULL; + resv = vm->root.bo->tbo.base.resv; } else { - struct ttm_dma_tt *ttm; + struct drm_gem_object *obj = &bo->tbo.base; + + resv = bo->tbo.base.resv; + if (obj->import_attach && bo_va->is_xgmi) { + struct dma_buf *dma_buf = obj->import_attach->dmabuf; + struct drm_gem_object *gobj = dma_buf->priv; + struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - mem = &bo->tbo.mem; - nodes = mem->mm_node; - if (mem->mem_type == TTM_PL_TT) { - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); - pages_addr = ttm->dma_address; + if (abo->tbo.resource->mem_type == TTM_PL_VRAM) + bo = gem_to_amdgpu_bo(gobj); } - exclusive = bo->tbo.moving; + mem = bo->tbo.resource; + if (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_PREEMPT) + pages_addr = bo->tbo.ttm->dma_address; } if (bo) { flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); + + if (amdgpu_bo_encrypted(bo)) + flags |= AMDGPU_PTE_TMZ; + bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); } else { flags = 0x0; } - if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)) + if (clear || (bo && bo->tbo.base.resv == + vm->root.bo->tbo.base.resv)) last_update = &vm->last_update; else last_update = &bo_va->last_pt_update; @@ -1743,9 +1936,26 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, - mapping, flags, bo_adev, nodes, - last_update); + uint64_t update_flags = flags; + + /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here + * but in case of something, we filter the flags in first place + */ + if (!(mapping->flags & AMDGPU_PTE_READABLE)) + update_flags &= ~AMDGPU_PTE_READABLE; + if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) + update_flags &= ~AMDGPU_PTE_WRITEABLE; + + /* Apply ASIC specific mapping flags */ + amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags); + + trace_amdgpu_vm_bo_update(mapping); + + r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, + resv, mapping->start, + mapping->last, update_flags, + mapping->offset, mem, + pages_addr, last_update, table_freed); if (r) return r; } @@ -1754,8 +1964,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, * the evicted list so that it gets validated again on the * next command submission. */ - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { - uint32_t mem_type = bo->tbo.mem.mem_type; + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { + uint32_t mem_type = bo->tbo.resource->mem_type; if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) @@ -1891,18 +2101,17 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, */ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; struct dma_fence *excl, **shared; unsigned i, shared_count; int r; - r = dma_resv_get_fences_rcu(resv, &excl, - &shared_count, &shared); + r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } @@ -1938,6 +2147,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { + struct dma_resv *resv = vm->root.bo->tbo.base.resv; struct amdgpu_bo_va_mapping *mapping; uint64_t init_pte_value = 0; struct dma_fence *f = NULL; @@ -1952,9 +2162,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, - mapping->start, mapping->last, - init_pte_value, 0, NULL, &f); + r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, + resv, mapping->start, + mapping->last, init_pte_value, + 0, NULL, NULL, &f, NULL); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -1996,7 +2207,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; } @@ -2015,7 +2226,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, else clear = true; - r = amdgpu_vm_bo_update(adev, bo_va, clear); + r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); if (r) return r; @@ -2059,14 +2270,13 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && - (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { + if (!bo) + return bo_va; + + if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) { bo_va->is_xgmi = true; - mutex_lock(&adev->vm_manager.lock_pstate); /* Power up XGMI if it can be potentially used */ - if (++adev->vm_manager.xgmi_map_counter == 1) - amdgpu_xgmi_set_pstate(adev, 1); - mutex_unlock(&adev->vm_manager.lock_pstate); + amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); } return bo_va; @@ -2074,7 +2284,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, /** - * amdgpu_vm_bo_insert_mapping - insert a new mapping + * amdgpu_vm_bo_insert_map - insert a new mapping * * @adev: amdgpu_device pointer * @bo_va: bo_va to store the address @@ -2096,7 +2306,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, if (mapping->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && !bo_va->base.moved) { list_move(&bo_va->base.vm_status, &vm->moved); } @@ -2131,14 +2341,15 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t eaddr; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo))) + (bo && offset + size > amdgpu_bo_size(bo)) || + (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2196,14 +2407,15 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, int r; /* validate the parameters */ - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || - size == 0 || size & AMDGPU_GPU_PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || + size == 0 || size & ~PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo))) + (bo && offset + size > amdgpu_bo_size(bo)) || + (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; /* Allocate all the needed memory */ @@ -2341,7 +2553,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, after->start = eaddr + 1; after->last = tmp->last; after->offset = tmp->offset; - after->offset += after->start - tmp->start; + after->offset += (after->start - tmp->start) << PAGE_SHIFT; after->flags = tmp->flags; after->bo_va = tmp->bo_va; list_add(&after->list, &tmp->bo_va->invalids); @@ -2456,7 +2668,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_vm_bo_base **base; if (bo) { - if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) vm->bulk_moveable = false; for (base = &bo_va->base.bo->vm_bo; *base; @@ -2489,17 +2701,47 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, dma_fence_put(bo_va->last_pt_update); - if (bo && bo_va->is_xgmi) { - mutex_lock(&adev->vm_manager.lock_pstate); - if (--adev->vm_manager.xgmi_map_counter == 0) - amdgpu_xgmi_set_pstate(adev, 0); - mutex_unlock(&adev->vm_manager.lock_pstate); - } + if (bo && bo_va->is_xgmi) + amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN); kfree(bo_va); } /** + * amdgpu_vm_evictable - check if we can evict a VM + * + * @bo: A page table of the VM. + * + * Check if it is possible to evict a VM. + */ +bool amdgpu_vm_evictable(struct amdgpu_bo *bo) +{ + struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; + + /* Page tables of a destroyed VM can go away immediately */ + if (!bo_base || !bo_base->vm) + return true; + + /* Don't evict VM page tables while they are busy */ + if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) + return false; + + /* Try to block ongoing updates */ + if (!amdgpu_vm_eviction_trylock(bo_base->vm)) + return false; + + /* Don't evict VM page tables while they are updated */ + if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { + amdgpu_vm_eviction_unlock(bo_base->vm); + return false; + } + + bo_base->vm->evicting = true; + amdgpu_vm_eviction_unlock(bo_base->vm); + return true; +} + +/** * amdgpu_vm_bo_invalidate - mark the bo as invalid * * @adev: amdgpu_device pointer @@ -2514,13 +2756,13 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_vm_bo_base *bo_base; /* shadow bo doesn't have bo base, its validation needs its parent */ - if (bo->parent && bo->parent->shadow == bo) + if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) bo = bo->parent; for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { + if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { amdgpu_vm_bo_evicted(bo_base); continue; } @@ -2531,7 +2773,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, if (bo->tbo.type == ttm_bo_type_kernel) amdgpu_vm_bo_relocated(bo_base); - else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) amdgpu_vm_bo_moved(bo_base); else amdgpu_vm_bo_invalidated(bo_base); @@ -2661,8 +2903,12 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, + true, timeout); + if (timeout <= 0) + return timeout; + + return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); } /** @@ -2670,19 +2916,16 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) * * @adev: amdgpu_device pointer * @vm: requested vm - * @vm_context: Indicates if it GFX or Compute context - * @pasid: Process address space identifier * * Init @vm fields. * * Returns: * 0 for success, error for failure. */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context, unsigned int pasid) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct amdgpu_bo_param bp; - struct amdgpu_bo *root; + struct amdgpu_bo *root_bo; + struct amdgpu_bo_vm *root; int r, i; vm->va = RB_ROOT_CACHED; @@ -2695,30 +2938,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&vm->invalidated); spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->freed); + INIT_LIST_HEAD(&vm->done); /* create scheduler entities for page table updates */ - r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs, - adev->vm_manager.vm_pte_num_rqs, NULL); + r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, + adev->vm_manager.vm_pte_scheds, + adev->vm_manager.vm_pte_num_scheds, NULL); if (r) return r; - r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs, - adev->vm_manager.vm_pte_num_rqs, NULL); + r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, + adev->vm_manager.vm_pte_scheds, + adev->vm_manager.vm_pte_num_scheds, NULL); if (r) - goto error_free_direct; + goto error_free_immediate; vm->pte_support_ats = false; + vm->is_compute_context = false; - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { - vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & - AMDGPU_VM_USE_CPU_FOR_COMPUTE); + vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & + AMDGPU_VM_USE_CPU_FOR_GFX); - if (adev->asic_type == CHIP_RAVEN) - vm->pte_support_ats = true; - } else { - vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & - AMDGPU_VM_USE_CPU_FOR_GFX); - } DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && @@ -2730,60 +2970,50 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, else vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; + vm->last_unlocked = dma_fence_get_stub(); + + mutex_init(&vm->eviction_lock); + vm->evicting = false; - amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) - bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; - r = amdgpu_bo_create(adev, &bp, &root); + r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, + false, &root); if (r) goto error_free_delayed; - - r = amdgpu_bo_reserve(root, true); + root_bo = &root->bo; + r = amdgpu_bo_reserve(root_bo, true); if (r) goto error_free_root; - r = dma_resv_reserve_shared(root->tbo.base.resv, 1); + r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1); if (r) goto error_unreserve; - amdgpu_vm_bo_base_init(&vm->root.base, vm, root); + amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); r = amdgpu_vm_clear_bo(adev, vm, root, false); if (r) goto error_unreserve; - amdgpu_bo_unreserve(vm->root.base.bo); - - if (pasid) { - unsigned long flags; - - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); - r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, - GFP_ATOMIC); - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - if (r < 0) - goto error_free_root; - - vm->pasid = pasid; - } + amdgpu_bo_unreserve(vm->root.bo); INIT_KFIFO(vm->faults); return 0; error_unreserve: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); error_free_root: - amdgpu_bo_unref(&vm->root.base.bo->shadow); - amdgpu_bo_unref(&vm->root.base.bo); - vm->root.base.bo = NULL; + amdgpu_bo_unref(&root->shadow); + amdgpu_bo_unref(&root_bo); + vm->root.bo = NULL; error_free_delayed: + dma_fence_put(vm->last_unlocked); drm_sched_entity_destroy(&vm->delayed); -error_free_direct: - drm_sched_entity_destroy(&vm->direct); +error_free_immediate: + drm_sched_entity_destroy(&vm->immediate); return r; } @@ -2802,17 +3032,14 @@ error_free_direct: * 0 if this VM is clean */ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm) + struct amdgpu_vm *vm) { enum amdgpu_vm_level root = adev->vm_manager.root_level; unsigned int entries = amdgpu_vm_num_entries(adev, root); unsigned int i = 0; - if (!(vm->root.entries)) - return 0; - for (i = 0; i < entries; i++) { - if (vm->root.entries[i].base.bo) + if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) return -EINVAL; } @@ -2824,7 +3051,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requested vm - * @pasid: pasid to use * * This only works on GFX VMs that don't have any BOs added and no * page tables allocated yet. @@ -2832,7 +3058,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * Changes the following VM parameters: * - use_cpu_for_update * - pte_supports_ats - * - pasid (old PASID is released, because compute manages its own PASIDs) * * Reinitializes the page directory to reflect the changed ATS * setting. @@ -2840,13 +3065,12 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * Returns: * 0 for success, -errno for errors. */ -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned int pasid) +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) { bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; - r = amdgpu_bo_reserve(vm->root.base.bo, true); + r = amdgpu_bo_reserve(vm->root.bo, true); if (r) return r; @@ -2855,27 +3079,16 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) goto unreserve_bo; - if (pasid) { - unsigned long flags; - - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); - r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, - GFP_ATOMIC); - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - - if (r == -ENOSPC) - goto unreserve_bo; - r = 0; - } - /* Check if PD needs to be reinitialized and do it before * changing any other state, in case it fails. */ if (pte_support_ats != vm->pte_support_ats) { vm->pte_support_ats = pte_support_ats; - r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); + r = amdgpu_vm_clear_bo(adev, vm, + to_amdgpu_bo_vm(vm->root.bo), + false); if (r) - goto free_idr; + goto unreserve_bo; } /* Update VM state */ @@ -2887,45 +3100,28 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); - if (vm->use_cpu_for_update) + if (vm->use_cpu_for_update) { + /* Sync with last SDMA update/clear before switching to CPU */ + r = amdgpu_bo_sync_wait(vm->root.bo, + AMDGPU_FENCE_OWNER_UNDEFINED, true); + if (r) + goto unreserve_bo; + vm->update_funcs = &amdgpu_vm_cpu_funcs; - else + } else { vm->update_funcs = &amdgpu_vm_sdma_funcs; + } dma_fence_put(vm->last_update); vm->last_update = NULL; - - if (vm->pasid) { - unsigned long flags; - - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); - idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - - /* Free the original amdgpu allocated pasid - * Will be replaced with kfd allocated pasid - */ - amdgpu_pasid_free(vm->pasid); - vm->pasid = 0; - } + vm->is_compute_context = true; /* Free the shadow bo for compute VM */ - amdgpu_bo_unref(&vm->root.base.bo->shadow); - - if (pasid) - vm->pasid = pasid; + amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); goto unreserve_bo; -free_idr: - if (pasid) { - unsigned long flags; - - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); - idr_remove(&adev->vm_manager.pasid_idr, pasid); - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - } unreserve_bo: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); return r; } @@ -2939,14 +3135,8 @@ unreserve_bo: */ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - if (vm->pasid) { - unsigned long flags; - - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); - idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - } - vm->pasid = 0; + amdgpu_vm_set_pasid(adev, vm, 0); + vm->is_compute_context = false; } /** @@ -2967,16 +3157,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); - root = amdgpu_bo_ref(vm->root.base.bo); + root = amdgpu_bo_ref(vm->root.bo); amdgpu_bo_reserve(root, true); - if (vm->pasid) { - unsigned long flags; - - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); - idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - vm->pasid = 0; - } + amdgpu_vm_set_pasid(adev, vm, 0); + dma_fence_wait(vm->last_unlocked, false); + dma_fence_put(vm->last_unlocked); list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { @@ -2991,9 +3176,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_pts(adev, vm, NULL); amdgpu_bo_unreserve(root); amdgpu_bo_unref(&root); - WARN_ON(vm->root.base.bo); + WARN_ON(vm->root.bo); - drm_sched_entity_destroy(&vm->direct); + drm_sched_entity_destroy(&vm->immediate); drm_sched_entity_destroy(&vm->delayed); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { @@ -3024,6 +3209,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) { unsigned i; + /* Concurrent flushes are only possible starting with Vega10 and + * are broken on Navi10 and Navi14. + */ + adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || + adev->asic_type == CHIP_NAVI10 || + adev->asic_type == CHIP_NAVI14); amdgpu_vmid_mgr_init(adev); adev->vm_manager.fence_context = @@ -3050,11 +3241,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) adev->vm_manager.vm_update_mode = 0; #endif - idr_init(&adev->vm_manager.pasid_idr); - spin_lock_init(&adev->vm_manager.pasid_lock); - - adev->vm_manager.xgmi_map_counter = 0; - mutex_init(&adev->vm_manager.lock_pstate); + xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ); } /** @@ -3066,8 +3253,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) { - WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); - idr_destroy(&adev->vm_manager.pasid_idr); + WARN_ON(!xa_empty(&adev->vm_manager.pasids)); + xa_destroy(&adev->vm_manager.pasids); amdgpu_vmid_mgr_fini(adev); } @@ -3085,8 +3272,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_vm *args = data; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; + long timeout = msecs_to_jiffies(2000); int r; switch (args->in.op) { @@ -3098,6 +3286,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: + if (amdgpu_sriov_runtime(adev)) + timeout = 8 * timeout; + + /* Wait vm idle to make sure the vmid set in SPM_VMID is + * not referenced anymore. + */ + r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); + if (r) + return r; + + r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); + if (r < 0) + return r; + + amdgpu_bo_unreserve(fpriv->vm.root.bo); amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); break; default: @@ -3114,19 +3317,19 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) * @pasid: PASID identifier for VM * @task_info: task_info to fill. */ -void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, +void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info) { struct amdgpu_vm *vm; unsigned long flags; - spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + xa_lock_irqsave(&adev->vm_manager.pasids, flags); - vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + vm = xa_load(&adev->vm_manager.pasids, pasid); if (vm) *task_info = vm->task_info; - spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); } /** @@ -3154,58 +3357,85 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * @adev: amdgpu device pointer * @pasid: PASID of the VM * @addr: Address of the fault + * @write_fault: true is write fault, false is read fault * * Try to gracefully handle a VM fault. Return true if the fault was handled and * shouldn't be reported any more. */ -bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, - uint64_t addr) +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, + uint64_t addr, bool write_fault) { + bool is_compute_context = false; struct amdgpu_bo *root; + unsigned long irqflags; uint64_t value, flags; struct amdgpu_vm *vm; - long r; + int r; - spin_lock(&adev->vm_manager.pasid_lock); - vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - if (vm) - root = amdgpu_bo_ref(vm->root.base.bo); - else + xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); + vm = xa_load(&adev->vm_manager.pasids, pasid); + if (vm) { + root = amdgpu_bo_ref(vm->root.bo); + is_compute_context = vm->is_compute_context; + } else { root = NULL; - spin_unlock(&adev->vm_manager.pasid_lock); + } + xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); if (!root) return false; + addr /= AMDGPU_GPU_PAGE_SIZE; + + if (is_compute_context && + !svm_range_restore_pages(adev, pasid, addr, write_fault)) { + amdgpu_bo_unref(&root); + return true; + } + r = amdgpu_bo_reserve(root, true); if (r) goto error_unref; /* Double check that the VM still exists */ - spin_lock(&adev->vm_manager.pasid_lock); - vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - if (vm && vm->root.base.bo != root) + xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); + vm = xa_load(&adev->vm_manager.pasids, pasid); + if (vm && vm->root.bo != root) vm = NULL; - spin_unlock(&adev->vm_manager.pasid_lock); + xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); if (!vm) goto error_unlock; - addr /= AMDGPU_GPU_PAGE_SIZE; flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | AMDGPU_PTE_SYSTEM; - if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { + if (is_compute_context) { + /* Intentionally setting invalid PTE flag + * combination to force a no-retry-fault + */ + flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | + AMDGPU_PTE_TF; + value = 0; + } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { /* Redirect the access to the dummy page */ value = adev->dummy_page_addr; flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE; + } else { /* Let the hw retry silently on the PTE */ value = 0; } - r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1, - flags, value, NULL, NULL); + r = dma_resv_reserve_shared(root->tbo.base.resv, 1); + if (r) { + pr_debug("failed %d to reserve fence slot\n", r); + goto error_unlock; + } + + r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr, + addr, flags, value, NULL, NULL, NULL, + NULL); if (r) goto error_unlock; @@ -3214,10 +3444,106 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, error_unlock: amdgpu_bo_unreserve(root); if (r < 0) - DRM_ERROR("Can't handle page fault (%ld)\n", r); + DRM_ERROR("Can't handle page fault (%d)\n", r); error_unref: amdgpu_bo_unref(&root); return false; } + +#if defined(CONFIG_DEBUG_FS) +/** + * amdgpu_debugfs_vm_bo_info - print BO info for the VM + * + * @vm: Requested VM for printing BO info + * @m: debugfs file + * + * Print BO information in debugfs file for the VM + */ +void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) +{ + struct amdgpu_bo_va *bo_va, *tmp; + u64 total_idle = 0; + u64 total_evicted = 0; + u64 total_relocated = 0; + u64 total_moved = 0; + u64 total_invalidated = 0; + u64 total_done = 0; + unsigned int total_idle_objs = 0; + unsigned int total_evicted_objs = 0; + unsigned int total_relocated_objs = 0; + unsigned int total_moved_objs = 0; + unsigned int total_invalidated_objs = 0; + unsigned int total_done_objs = 0; + unsigned int id = 0; + + seq_puts(m, "\tIdle BOs:\n"); + list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { + if (!bo_va->base.bo) + continue; + total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); + } + total_idle_objs = id; + id = 0; + + seq_puts(m, "\tEvicted BOs:\n"); + list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { + if (!bo_va->base.bo) + continue; + total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); + } + total_evicted_objs = id; + id = 0; + + seq_puts(m, "\tRelocated BOs:\n"); + list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { + if (!bo_va->base.bo) + continue; + total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); + } + total_relocated_objs = id; + id = 0; + + seq_puts(m, "\tMoved BOs:\n"); + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { + if (!bo_va->base.bo) + continue; + total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); + } + total_moved_objs = id; + id = 0; + + seq_puts(m, "\tInvalidated BOs:\n"); + spin_lock(&vm->invalidated_lock); + list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { + if (!bo_va->base.bo) + continue; + total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); + } + total_invalidated_objs = id; + id = 0; + + seq_puts(m, "\tDone BOs:\n"); + list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { + if (!bo_va->base.bo) + continue; + total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); + } + spin_unlock(&vm->invalidated_lock); + total_done_objs = id; + + seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, + total_idle_objs); + seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted, + total_evicted_objs); + seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated, + total_relocated_objs); + seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved, + total_moved_objs); + seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated, + total_invalidated_objs); + seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done, + total_done_objs); +} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4dbbe1b6b413..85fcfb8c5efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -30,6 +30,7 @@ #include <drm/gpu_scheduler.h> #include <drm/drm_file.h> #include <drm/ttm/ttm_bo_driver.h> +#include <linux/sched/mm.h> #include "amdgpu_sync.h" #include "amdgpu_ring.h" @@ -38,6 +39,7 @@ struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; +struct amdgpu_bo_vm; /* * GPUVM handling @@ -53,6 +55,9 @@ struct amdgpu_bo_list_entry; #define AMDGPU_PTE_SYSTEM (1ULL << 1) #define AMDGPU_PTE_SNOOPED (1ULL << 2) +/* RV+ */ +#define AMDGPU_PTE_TMZ (1ULL << 3) + /* VI only */ #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) @@ -72,6 +77,9 @@ struct amdgpu_bo_list_entry; /* PTE is handled as PDE for VEGA10 (Translate Further) */ #define AMDGPU_PTE_TF (1ULL << 56) +/* MALL noalloc for sienna_cichlid, reserved for older ASICs */ +#define AMDGPU_PTE_NOALLOC (1ULL << 58) + /* PDE Block Fragment Size for VEGA10 */ #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) @@ -94,13 +102,13 @@ struct amdgpu_bo_list_entry; #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48) #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL) -/* How to programm VM fault handling */ +/* How to program VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 /* Reserve 4MB VRAM for page tables */ -#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20) +#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) /* max number of VMHUB */ #define AMDGPU_MAX_VMHUBS 3 @@ -108,15 +116,12 @@ struct amdgpu_bo_list_entry; #define AMDGPU_MMHUB_0 1 #define AMDGPU_MMHUB_1 2 -/* hardcode that limit for now */ -#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) +/* Reserve 2MB at top/bottom of address space for kernel use */ +#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) /* max vmids dedicated for process */ #define AMDGPU_VM_MAX_RESERVED_VMID 1 -#define AMDGPU_VM_CONTEXT_GFX 0 -#define AMDGPU_VM_CONTEXT_COMPUTE 1 - /* See vm_update_mode */ #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) @@ -147,13 +152,6 @@ struct amdgpu_vm_bo_base { bool moved; }; -struct amdgpu_vm_pt { - struct amdgpu_vm_bo_base base; - - /* array of page tables, one for each directory entry */ - struct amdgpu_vm_pt *entries; -}; - /* provided by hw blocks that can write ptes, e.g., sdma */ struct amdgpu_vm_pte_funcs { /* number of dw to reserve per operation */ @@ -202,9 +200,14 @@ struct amdgpu_vm_update_params { struct amdgpu_vm *vm; /** - * @direct: if changes should be made directly + * @immediate: if changes should be made immediately */ - bool direct; + bool immediate; + + /** + * @unlocked: true if the root BO is not locked + */ + bool unlocked; /** * @pages_addr: @@ -222,14 +225,19 @@ struct amdgpu_vm_update_params { * @num_dw_left: number of dw left for the IB */ unsigned int num_dw_left; + + /** + * @table_freed: return true if page table is freed when updating + */ + bool table_freed; }; struct amdgpu_vm_update_funcs { - int (*map_table)(struct amdgpu_bo *bo); - int (*prepare)(struct amdgpu_vm_update_params *p, void * owner, - struct dma_fence *exclusive); + int (*map_table)(struct amdgpu_bo_vm *bo); + int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode); int (*update)(struct amdgpu_vm_update_params *p, - struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, + struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); int (*commit)(struct amdgpu_vm_update_params *p, struct dma_fence **fence); @@ -239,6 +247,13 @@ struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; + /* Lock to prevent eviction while we are updating page tables + * use vm_eviction_lock/unlock(vm) + */ + struct mutex eviction_lock; + bool evicting; + unsigned int saved_flags; + /* BOs who needs a validation */ struct list_head evicted; @@ -258,14 +273,20 @@ struct amdgpu_vm { /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; + /* BOs which are invalidated, has been updated in the PTs */ + struct list_head done; + /* contains the page directory */ - struct amdgpu_vm_pt root; + struct amdgpu_vm_bo_base root; struct dma_fence *last_update; /* Scheduler entities for page table updates */ - struct drm_sched_entity direct; + struct drm_sched_entity immediate; struct drm_sched_entity delayed; + /* Last unlocked submission to the scheduler entities */ + struct dma_fence *last_unlocked; + unsigned int pasid; /* dedicated to vm */ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; @@ -298,11 +319,15 @@ struct amdgpu_vm { struct ttm_lru_bulk_move lru_bulk_move; /* mark whether can do the bulk move */ bool bulk_moveable; + /* Flag to indicate if VM is used for compute */ + bool is_compute_context; }; struct amdgpu_vm_manager { /* Handling of VMIDs */ struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; + unsigned int first_kfd_vmid; + bool concurrent_flush; /* Handling of VM fences */ u64 fence_context; @@ -317,8 +342,8 @@ struct amdgpu_vm_manager { u64 vram_base_offset; /* vm pte handling */ const struct amdgpu_vm_pte_funcs *vm_pte_funcs; - struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS]; - unsigned vm_pte_num_rqs; + struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS]; + unsigned vm_pte_num_scheds; struct amdgpu_ring *page_fault; /* partial resident texture handling */ @@ -334,14 +359,11 @@ struct amdgpu_vm_manager { /* PASID to VM mapping, will be used in interrupt context to * look up VM of a page fault */ - struct idr pasid_idr; - spinlock_t pasid_lock; - - /* counter of mapped memory through xgmi */ - uint32_t xgmi_map_counter; - struct mutex lock_pstate; + struct xarray pasids; }; +struct amdgpu_bo_va_mapping; + #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) @@ -352,10 +374,12 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs; void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); +int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, + u32 pasid); + long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context, unsigned int pasid); -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, @@ -367,15 +391,25 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); int amdgpu_vm_update_pdes(struct amdgpu_device *adev, - struct amdgpu_vm *vm, bool direct); + struct amdgpu_vm *vm, bool immediate); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_device *bo_adev, + struct amdgpu_vm *vm, bool immediate, + bool unlocked, struct dma_resv *resv, + uint64_t start, uint64_t last, + uint64_t flags, uint64_t offset, + struct ttm_resource *res, + dma_addr_t *pages_addr, + struct dma_fence **fence, bool *free_table); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear); + bool clear, bool *table_freed); +bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); @@ -411,15 +445,21 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); -void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, +void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); -bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, - uint64_t addr); +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, + uint64_t addr, bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo); +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem); + +#if defined(CONFIG_DEBUG_FS) +void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); +#endif #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 73fec7a0ced5..e3fbf0f10add 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -29,49 +29,37 @@ * * @table: newly allocated or validated PD/PT */ -static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table) +static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) { - return amdgpu_bo_kmap(table, NULL); + return amdgpu_bo_kmap(&table->bo, NULL); } /** * amdgpu_vm_cpu_prepare - prepare page table update with the CPU * * @p: see amdgpu_vm_update_params definition - * @owner: owner we need to sync to - * @exclusive: exclusive move fence we need to sync to + * @resv: reservation object with embedded fence + * @sync_mode: synchronization mode * * Returns: * Negativ errno, 0 for success. */ -static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, - struct dma_fence *exclusive) +static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, + struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode) { - int r; - - /* Wait for any BO move to be completed */ - if (exclusive) { - r = dma_fence_wait(exclusive, true); - if (unlikely(r)) - return r; - } - - /* Don't wait for submissions during page fault */ - if (p->direct) + if (!resv) return 0; - /* Wait for PT BOs to be idle. PTs share the same resv. object - * as the root PD BO - */ - return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); + return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true); } /** * amdgpu_vm_cpu_update - helper to update page tables via CPU * * @p: see amdgpu_vm_update_params definition - * @bo: PD/PT to update - * @pe: kmap addr of the page entry + * @vmbo: PD/PT to update + * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes @@ -80,16 +68,23 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, * Write count number of PT/PD entries directly. */ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, - struct amdgpu_bo *bo, uint64_t pe, + struct amdgpu_bo_vm *vmbo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { unsigned int i; uint64_t value; + int r; + + if (vmbo->bo.tbo.moving) { + r = dma_fence_wait(vmbo->bo.tbo.moving, true); + if (r) + return r; + } - pe += (unsigned long)amdgpu_bo_kptr(bo); + pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); for (i = 0; i < count; i++) { value = p->pages_addr ? @@ -115,7 +110,7 @@ static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, { /* Flush HDP */ mb(); - amdgpu_asic_flush_hdp(p->adev, NULL); + amdgpu_device_flush_hdp(p->adev, NULL); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 832db59f441e..dbb551762805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -33,11 +33,11 @@ * * @table: newly allocated or validated PD/PT */ -static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) +static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) { int r; - r = amdgpu_ttm_alloc_gart(&table->tbo); + r = amdgpu_ttm_alloc_gart(&table->bo.tbo); if (r) return r; @@ -51,36 +51,31 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) * amdgpu_vm_sdma_prepare - prepare SDMA command submission * * @p: see amdgpu_vm_update_params definition - * @owner: owner we need to sync to - * @exclusive: exclusive move fence we need to sync to + * @resv: reservation object with embedded fence + * @sync_mode: synchronization mode * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, - void *owner, struct dma_fence *exclusive) + struct dma_resv *resv, + enum amdgpu_sync_mode sync_mode) { - struct amdgpu_bo *root = p->vm->root.base.bo; + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE + : AMDGPU_IB_POOL_DELAYED; unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; int r; - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job); + r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); if (r) return r; p->num_dw_left = ndw; - /* Wait for moves to be completed */ - r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false); - if (r) - return r; - - /* Don't wait for any submissions during page fault handling */ - if (p->direct) + if (!resv) return 0; - return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, - owner, false); + return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm); } /** @@ -95,14 +90,13 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { - struct amdgpu_bo *root = p->vm->root.base.bo; struct amdgpu_ib *ib = p->job->ibs; struct drm_sched_entity *entity; struct amdgpu_ring *ring; struct dma_fence *f; int r; - entity = p->direct ? &p->vm->direct : &p->vm->delayed; + entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); WARN_ON(ib->length_dw == 0); @@ -112,8 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, if (r) goto error; - amdgpu_bo_fence(root, f, true); - if (fence && !p->direct) + if (p->unlocked) { + struct dma_fence *tmp = dma_fence_get(f); + + swap(p->vm->last_unlocked, f); + dma_fence_put(tmp); + } else { + amdgpu_bo_fence(p->vm->root.bo, f, true); + } + + if (fence && !p->immediate) swap(*fence, f); dma_fence_put(f); return 0; @@ -142,8 +144,8 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, src += p->num_dw_left * 4; - pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); + pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo)); + trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); } @@ -153,7 +155,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, * * @p: see amdgpu_vm_update_params definition * @bo: PD/PT to update - * @pe: addr of the page entry + * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes @@ -169,8 +171,8 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, { struct amdgpu_ib *ib = p->job->ibs; - pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); + pe += amdgpu_gmc_sign_extend(amdgpu_bo_gpu_offset_no_check(bo)); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); if (count < 3) { amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, count, incr); @@ -184,8 +186,8 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, * amdgpu_vm_sdma_update - execute VM update * * @p: see amdgpu_vm_update_params definition - * @bo: PD/PT to update - * @pe: addr of the page entry + * @vmbo: PD/PT to update + * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes @@ -195,14 +197,22 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, * the IB. */ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, - struct amdgpu_bo *bo, uint64_t pe, + struct amdgpu_bo_vm *vmbo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { + struct amdgpu_bo *bo = &vmbo->bo; + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE + : AMDGPU_IB_POOL_DELAYED; unsigned int i, ndw, nptes; uint64_t *pte; int r; + /* Wait for PD/PT moves to be completed */ + r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving); + if (r) + return r; + do { ndw = p->num_dw_left; ndw -= p->job->ibs->length_dw; @@ -219,7 +229,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job); + r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, + &p->job); if (r) return r; @@ -228,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (!p->pages_addr) { /* set page commands needed */ - if (bo->shadow) - amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr, + if (vmbo->shadow) + amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr, count, incr, flags); amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count, incr, flags); @@ -238,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, /* copy commands needed */ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw * - (bo->shadow ? 2 : 1); + (vmbo->shadow ? 2 : 1); /* for padding */ ndw -= 7; @@ -253,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, pte[i] |= flags; } - if (bo->shadow) - amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes); + if (vmbo->shadow) + amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes); amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes); pe += nptes * 8; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 82a3299e53c0..7b2b0980ec41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -22,18 +22,32 @@ * Authors: Christian König */ +#include <linux/dma-mapping.h> +#include <drm/ttm/ttm_range_manager.h> + #include "amdgpu.h" #include "amdgpu_vm.h" +#include "amdgpu_res_cursor.h" #include "amdgpu_atomfirmware.h" #include "atom.h" -struct amdgpu_vram_mgr { - struct drm_mm mm; - spinlock_t lock; - atomic64_t usage; - atomic64_t vis_usage; +struct amdgpu_vram_reservation { + struct list_head node; + struct drm_mm_node mm_node; }; +static inline struct amdgpu_vram_mgr * +to_vram_mgr(struct ttm_resource_manager *man) +{ + return container_of(man, struct amdgpu_vram_mgr, manager); +} + +static inline struct amdgpu_device * +to_amdgpu_device(struct amdgpu_vram_mgr *mgr) +{ + return container_of(mgr, struct amdgpu_device, mman.vram_mgr); +} + /** * DOC: mem_info_vram_total * @@ -46,9 +60,9 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); - return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size); + return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size); } /** @@ -63,9 +77,9 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); - return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size); + return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size); } /** @@ -77,13 +91,15 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, * amount of currently used VRAM in bytes */ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man; - return snprintf(buf, PAGE_SIZE, "%llu\n", - amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM])); + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man)); } /** @@ -95,45 +111,55 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, * amount of currently used visible VRAM in bytes */ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man; - return snprintf(buf, PAGE_SIZE, "%llu\n", - amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM])); + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); } +/** + * DOC: mem_info_vram_vendor + * + * The amdgpu driver provides a sysfs API for reporting the vendor of the + * installed VRAM + * The file mem_info_vram_vendor is used for this and returns the name of the + * vendor. + */ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); switch (adev->gmc.vram_vendor) { case SAMSUNG: - return snprintf(buf, PAGE_SIZE, "samsung\n"); + return sysfs_emit(buf, "samsung\n"); case INFINEON: - return snprintf(buf, PAGE_SIZE, "infineon\n"); + return sysfs_emit(buf, "infineon\n"); case ELPIDA: - return snprintf(buf, PAGE_SIZE, "elpida\n"); + return sysfs_emit(buf, "elpida\n"); case ETRON: - return snprintf(buf, PAGE_SIZE, "etron\n"); + return sysfs_emit(buf, "etron\n"); case NANYA: - return snprintf(buf, PAGE_SIZE, "nanya\n"); + return sysfs_emit(buf, "nanya\n"); case HYNIX: - return snprintf(buf, PAGE_SIZE, "hynix\n"); + return sysfs_emit(buf, "hynix\n"); case MOSEL: - return snprintf(buf, PAGE_SIZE, "mosel\n"); + return sysfs_emit(buf, "mosel\n"); case WINBOND: - return snprintf(buf, PAGE_SIZE, "winbond\n"); + return sysfs_emit(buf, "winbond\n"); case ESMT: - return snprintf(buf, PAGE_SIZE, "esmt\n"); + return sysfs_emit(buf, "esmt\n"); case MICRON: - return snprintf(buf, PAGE_SIZE, "micron\n"); + return sysfs_emit(buf, "micron\n"); default: - return snprintf(buf, PAGE_SIZE, "unknown\n"); + return sysfs_emit(buf, "unknown\n"); } } @@ -148,89 +174,23 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, amdgpu_mem_info_vram_vendor, NULL); -/** - * amdgpu_vram_mgr_init - init VRAM manager and DRM MM - * - * @man: TTM memory type manager - * @p_size: maximum size of VRAM - * - * Allocate and initialize the VRAM manager. - */ -static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, - unsigned long p_size) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_vram_mgr *mgr; - int ret; - - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - if (!mgr) - return -ENOMEM; - - drm_mm_init(&mgr->mm, 0, p_size); - spin_lock_init(&mgr->lock); - man->priv = mgr; - - /* Add the two VRAM-related sysfs files */ - ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_vram_total\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_vram_used\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_vram_vendor\n"); - return ret; - } - - return 0; -} - -/** - * amdgpu_vram_mgr_fini - free and destroy VRAM manager - * - * @man: TTM memory type manager - * - * Destroy and free the VRAM manager, returns -EBUSY if ranges are still - * allocated inside it. - */ -static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_vram_mgr *mgr = man->priv; +static struct attribute *amdgpu_vram_mgr_attributes[] = { + &dev_attr_mem_info_vram_total.attr, + &dev_attr_mem_info_vis_vram_total.attr, + &dev_attr_mem_info_vram_used.attr, + &dev_attr_mem_info_vis_vram_used.attr, + &dev_attr_mem_info_vram_vendor.attr, + NULL +}; - spin_lock(&mgr->lock); - drm_mm_takedown(&mgr->mm); - spin_unlock(&mgr->lock); - kfree(mgr); - man->priv = NULL; - device_remove_file(adev->dev, &dev_attr_mem_info_vram_total); - device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total); - device_remove_file(adev->dev, &dev_attr_mem_info_vram_used); - device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used); - device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor); - return 0; -} +const struct attribute_group amdgpu_vram_mgr_attr_group = { + .attrs = amdgpu_vram_mgr_attributes +}; /** * amdgpu_vram_mgr_vis_size - Calculate visible node size * - * @adev: amdgpu device structure + * @adev: amdgpu_device pointer * @node: MM node structure * * Calculate how many bytes of the MM node are inside visible VRAM @@ -259,33 +219,130 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_mem_reg *mem = &bo->tbo.mem; - struct drm_mm_node *nodes = mem->mm_node; - unsigned pages = mem->num_pages; + struct ttm_resource *res = bo->tbo.resource; + unsigned pages = res->num_pages; + struct drm_mm_node *mm; u64 usage; if (amdgpu_gmc_vram_full_visible(&adev->gmc)) return amdgpu_bo_size(bo); - if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) + if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) return 0; - for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) - usage += amdgpu_vram_mgr_vis_size(adev, nodes); + mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; + for (usage = 0; pages; pages -= mm->size, mm++) + usage += amdgpu_vram_mgr_vis_size(adev, mm); return usage; } +/* Commit the reservation of VRAM pages */ +static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) +{ + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + struct amdgpu_device *adev = to_amdgpu_device(mgr); + struct drm_mm *mm = &mgr->mm; + struct amdgpu_vram_reservation *rsv, *temp; + uint64_t vis_usage; + + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { + if (drm_mm_reserve_node(mm, &rsv->mm_node)) + continue; + + dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", + rsv->mm_node.start, rsv->mm_node.size); + + vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); + atomic64_add(vis_usage, &mgr->vis_usage); + atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage); + list_move(&rsv->node, &mgr->reserved_pages); + } +} + +/** + * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM + * + * @man: TTM memory type manager + * @start: start address of the range in VRAM + * @size: size of the range + * + * Reserve memory from start addess with the specified size in VRAM + */ +int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, + uint64_t start, uint64_t size) +{ + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + struct amdgpu_vram_reservation *rsv; + + rsv = kzalloc(sizeof(*rsv), GFP_KERNEL); + if (!rsv) + return -ENOMEM; + + INIT_LIST_HEAD(&rsv->node); + rsv->mm_node.start = start >> PAGE_SHIFT; + rsv->mm_node.size = size >> PAGE_SHIFT; + + spin_lock(&mgr->lock); + list_add_tail(&mgr->reservations_pending, &rsv->node); + amdgpu_vram_mgr_do_reserve(man); + spin_unlock(&mgr->lock); + + return 0; +} + +/** + * amdgpu_vram_mgr_query_page_status - query the reservation status + * + * @man: TTM memory type manager + * @start: start address of a page in VRAM + * + * Returns: + * -EBUSY: the page is still hold and in pending list + * 0: the page has been reserved + * -ENOENT: the input page is not a reservation + */ +int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, + uint64_t start) +{ + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + struct amdgpu_vram_reservation *rsv; + int ret; + + spin_lock(&mgr->lock); + + list_for_each_entry(rsv, &mgr->reservations_pending, node) { + if ((rsv->mm_node.start <= start) && + (start < (rsv->mm_node.start + rsv->mm_node.size))) { + ret = -EBUSY; + goto out; + } + } + + list_for_each_entry(rsv, &mgr->reserved_pages, node) { + if ((rsv->mm_node.start <= start) && + (start < (rsv->mm_node.start + rsv->mm_node.size))) { + ret = 0; + goto out; + } + } + + ret = -ENOENT; +out: + spin_unlock(&mgr->lock); + return ret; +} + /** * amdgpu_vram_mgr_virt_start - update virtual start address * - * @mem: ttm_mem_reg to update + * @mem: ttm_resource to update * @node: just allocated node * * Calculate a virtual BO start address to easily check if everything is CPU * accessible. */ -static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, +static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, struct drm_mm_node *node) { unsigned long start; @@ -304,22 +361,22 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, * @man: TTM memory type manager * @tbo: TTM BO we need this range for * @place: placement flags and restrictions - * @mem: the resulting mem object + * @res: the resulting mem object * * Allocate VRAM for the given BO. */ -static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource **res) { - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_vram_mgr *mgr = man->priv; + unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + struct amdgpu_device *adev = to_amdgpu_device(mgr); + uint64_t vis_usage = 0, mem_bytes, max_bytes; + struct ttm_range_mgr_node *node; struct drm_mm *mm = &mgr->mm; - struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; - unsigned long lpfn, num_nodes, pages_per_node, pages_left; - uint64_t vis_usage = 0, mem_bytes, max_bytes; unsigned i; int r; @@ -332,11 +389,10 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, max_bytes -= AMDGPU_VM_RESERVED_VRAM; /* bail out quickly if there's likely not enough VRAM for this BO */ - mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; + mem_bytes = tbo->base.size; if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { - atomic64_sub(mem_bytes, &mgr->usage); - mem->mm_node = NULL; - return 0; + r = -ENOSPC; + goto error_sub; } if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { @@ -347,115 +403,230 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, pages_per_node = HPAGE_PMD_NR; #else /* default to 2MB */ - pages_per_node = (2UL << (20UL - PAGE_SHIFT)); + pages_per_node = 2UL << (20UL - PAGE_SHIFT); #endif - pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment); - num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); + pages_per_node = max_t(uint32_t, pages_per_node, + tbo->page_alignment); + num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node); } - nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), - GFP_KERNEL | __GFP_ZERO); - if (!nodes) { - atomic64_sub(mem_bytes, &mgr->usage); - return -ENOMEM; + node = kvmalloc(struct_size(node, mm_nodes, num_nodes), + GFP_KERNEL | __GFP_ZERO); + if (!node) { + r = -ENOMEM; + goto error_sub; } + ttm_resource_init(tbo, place, &node->base); + mode = DRM_MM_INSERT_BEST; if (place->flags & TTM_PL_FLAG_TOPDOWN) mode = DRM_MM_INSERT_HIGH; - mem->start = 0; - pages_left = mem->num_pages; + pages_left = node->base.num_pages; - spin_lock(&mgr->lock); - for (i = 0; pages_left >= pages_per_node; ++i) { - unsigned long pages = rounddown_pow_of_two(pages_left); - - r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, - pages_per_node, 0, - place->fpfn, lpfn, - mode); - if (unlikely(r)) - break; - - vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); - amdgpu_vram_mgr_virt_start(mem, &nodes[i]); - pages_left -= pages; - } + /* Limit maximum size to 2GB due to SG table limitations */ + pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); - for (; pages_left; ++i) { - unsigned long pages = min(pages_left, pages_per_node); - uint32_t alignment = mem->page_alignment; + i = 0; + spin_lock(&mgr->lock); + while (pages_left) { + uint32_t alignment = tbo->page_alignment; - if (pages == pages_per_node) + if (pages >= pages_per_node) alignment = pages_per_node; - r = drm_mm_insert_node_in_range(mm, &nodes[i], - pages, alignment, 0, - place->fpfn, lpfn, - mode); - if (unlikely(r)) - goto error; - - vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); - amdgpu_vram_mgr_virt_start(mem, &nodes[i]); + r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages, + alignment, 0, place->fpfn, + lpfn, mode); + if (unlikely(r)) { + if (pages > pages_per_node) { + if (is_power_of_2(pages)) + pages = pages / 2; + else + pages = rounddown_pow_of_two(pages); + continue; + } + goto error_free; + } + + vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); + amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); pages_left -= pages; + ++i; + + if (pages > pages_left) + pages = pages_left; } spin_unlock(&mgr->lock); - atomic64_add(vis_usage, &mgr->vis_usage); + if (i == 1) + node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; - mem->mm_node = nodes; + if (adev->gmc.xgmi.connected_to_cpu) + node->base.bus.caching = ttm_cached; + else + node->base.bus.caching = ttm_write_combined; + atomic64_add(vis_usage, &mgr->vis_usage); + *res = &node->base; return 0; -error: +error_free: while (i--) - drm_mm_remove_node(&nodes[i]); + drm_mm_remove_node(&node->mm_nodes[i]); spin_unlock(&mgr->lock); - atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); + kvfree(node); - kvfree(nodes); - return r == -ENOSPC ? 0 : r; +error_sub: + atomic64_sub(mem_bytes, &mgr->usage); + return r; } /** * amdgpu_vram_mgr_del - free ranges * * @man: TTM memory type manager - * @tbo: TTM BO we need this range for - * @place: placement flags and restrictions - * @mem: TTM memory object + * @res: TTM memory object * * Free the allocated VRAM again. */ -static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) +static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, + struct ttm_resource *res) { - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_vram_mgr *mgr = man->priv; - struct drm_mm_node *nodes = mem->mm_node; + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + struct amdgpu_device *adev = to_amdgpu_device(mgr); uint64_t usage = 0, vis_usage = 0; - unsigned pages = mem->num_pages; - - if (!mem->mm_node) - return; + unsigned i, pages; spin_lock(&mgr->lock); - while (pages) { - pages -= nodes->size; - drm_mm_remove_node(nodes); - usage += nodes->size << PAGE_SHIFT; - vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); - ++nodes; + for (i = 0, pages = res->num_pages; pages; + pages -= node->mm_nodes[i].size, ++i) { + struct drm_mm_node *mm = &node->mm_nodes[i]; + + drm_mm_remove_node(mm); + usage += mm->size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); } + amdgpu_vram_mgr_do_reserve(man); spin_unlock(&mgr->lock); atomic64_sub(usage, &mgr->usage); atomic64_sub(vis_usage, &mgr->vis_usage); - kvfree(mem->mm_node); - mem->mm_node = NULL; + kvfree(node); +} + +/** + * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table + * + * @adev: amdgpu device pointer + * @res: TTM memory object + * @offset: byte offset from the base of VRAM BO + * @length: number of bytes to export in sg_table + * @dev: the other device + * @dir: dma direction + * @sgt: resulting sg table + * + * Allocate and fill a sg table from a VRAM allocation. + */ +int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, + struct ttm_resource *res, + u64 offset, u64 length, + struct device *dev, + enum dma_data_direction dir, + struct sg_table **sgt) +{ + struct amdgpu_res_cursor cursor; + struct scatterlist *sg; + int num_entries = 0; + int i, r; + + *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); + if (!*sgt) + return -ENOMEM; + + /* Determine the number of DRM_MM nodes to export */ + amdgpu_res_first(res, offset, length, &cursor); + while (cursor.remaining) { + num_entries++; + amdgpu_res_next(&cursor, cursor.size); + } + + r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); + if (r) + goto error_free; + + /* Initialize scatterlist nodes of sg_table */ + for_each_sgtable_sg((*sgt), sg, i) + sg->length = 0; + + /* + * Walk down DRM_MM nodes to populate scatterlist nodes + * @note: Use iterator api to get first the DRM_MM node + * and the number of bytes from it. Access the following + * DRM_MM node(s) if more buffer needs to exported + */ + amdgpu_res_first(res, offset, length, &cursor); + for_each_sgtable_sg((*sgt), sg, i) { + phys_addr_t phys = cursor.start + adev->gmc.aper_base; + size_t size = cursor.size; + dma_addr_t addr; + + addr = dma_map_resource(dev, phys, size, dir, + DMA_ATTR_SKIP_CPU_SYNC); + r = dma_mapping_error(dev, addr); + if (r) + goto error_unmap; + + sg_set_page(sg, NULL, size, 0); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = size; + + amdgpu_res_next(&cursor, cursor.size); + } + + return 0; + +error_unmap: + for_each_sgtable_sg((*sgt), sg, i) { + if (!sg->length) + continue; + + dma_unmap_resource(dev, sg->dma_address, + sg->length, dir, + DMA_ATTR_SKIP_CPU_SYNC); + } + sg_free_table(*sgt); + +error_free: + kfree(*sgt); + return r; +} + +/** + * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table + * + * @dev: device pointer + * @dir: data direction of resource to unmap + * @sgt: sg table to free + * + * Free a previously allocate sg table. + */ +void amdgpu_vram_mgr_free_sgt(struct device *dev, + enum dma_data_direction dir, + struct sg_table *sgt) +{ + struct scatterlist *sg; + int i; + + for_each_sgtable_sg(sgt, sg, i) + dma_unmap_resource(dev, sg->dma_address, + sg->length, dir, + DMA_ATTR_SKIP_CPU_SYNC); + sg_free_table(sgt); + kfree(sgt); } /** @@ -465,9 +636,9 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, * * Returns how many bytes are used in this domain. */ -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) { - struct amdgpu_vram_mgr *mgr = man->priv; + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); return atomic64_read(&mgr->usage); } @@ -479,9 +650,9 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) * * Returns how many bytes are used in the visible part of VRAM */ -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) { - struct amdgpu_vram_mgr *mgr = man->priv; + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); return atomic64_read(&mgr->vis_usage); } @@ -494,10 +665,10 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { - struct amdgpu_vram_mgr *mgr = man->priv; + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); spin_lock(&mgr->lock); drm_mm_print(&mgr->mm, printer); @@ -508,10 +679,70 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, amdgpu_vram_mgr_vis_usage(man) >> 20); } -const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { - .init = amdgpu_vram_mgr_init, - .takedown = amdgpu_vram_mgr_fini, - .get_node = amdgpu_vram_mgr_new, - .put_node = amdgpu_vram_mgr_del, - .debug = amdgpu_vram_mgr_debug +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { + .alloc = amdgpu_vram_mgr_new, + .free = amdgpu_vram_mgr_del, + .debug = amdgpu_vram_mgr_debug }; + +/** + * amdgpu_vram_mgr_init - init VRAM manager and DRM MM + * + * @adev: amdgpu_device pointer + * + * Allocate and initialize the VRAM manager. + */ +int amdgpu_vram_mgr_init(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct ttm_resource_manager *man = &mgr->manager; + + ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); + + man->func = &amdgpu_vram_mgr_func; + + drm_mm_init(&mgr->mm, 0, man->size); + spin_lock_init(&mgr->lock); + INIT_LIST_HEAD(&mgr->reservations_pending); + INIT_LIST_HEAD(&mgr->reserved_pages); + + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); + ttm_resource_manager_set_used(man, true); + return 0; +} + +/** + * amdgpu_vram_mgr_fini - free and destroy VRAM manager + * + * @adev: amdgpu_device pointer + * + * Destroy and free the VRAM manager, returns -EBUSY if ranges are still + * allocated inside it. + */ +void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + struct amdgpu_vram_reservation *rsv, *temp; + + ttm_resource_manager_set_used(man, false); + + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + if (ret) + return; + + spin_lock(&mgr->lock); + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) + kfree(rsv); + + list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { + drm_mm_remove_node(&rsv->mm_node); + kfree(rsv); + } + drm_mm_takedown(&mgr->mm); + spin_unlock(&mgr->lock); + + ttm_resource_manager_cleanup(man); + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 61d13d8b7b20..978ac927ac11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -24,22 +24,153 @@ #include <linux/list.h> #include "amdgpu.h" #include "amdgpu_xgmi.h" -#include "amdgpu_smu.h" #include "amdgpu_ras.h" +#include "soc15.h" #include "df/df_3_6_offset.h" +#include "xgmi/xgmi_4_0_0_smn.h" +#include "xgmi/xgmi_4_0_0_sh_mask.h" +#include "wafl/wafl2_4_0_0_smn.h" +#include "wafl/wafl2_4_0_0_sh_mask.h" + +#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210 +#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c +#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210 static DEFINE_MUTEX(xgmi_mutex); -#define AMDGPU_MAX_XGMI_HIVE 8 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 -static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; -static unsigned hive_count = 0; - -void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) -{ - return &hive->device_list; -} +static LIST_HEAD(xgmi_hive_list); + +static const int xgmi_pcs_err_status_reg_vg20[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, +}; + +static const int wafl_pcs_err_status_reg_vg20[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const int xgmi_pcs_err_status_reg_arct[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000, +}; + +/* same as vg20*/ +static const int wafl_pcs_err_status_reg_arct[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const int xgmi23_pcs_err_status_reg_aldebaran[] = { + smnPCS_XGMI23_PCS_ERROR_STATUS, + smnPCS_XGMI23_PCS_ERROR_STATUS + 0x100000, + smnPCS_XGMI23_PCS_ERROR_STATUS + 0x200000, + smnPCS_XGMI23_PCS_ERROR_STATUS + 0x300000, + smnPCS_XGMI23_PCS_ERROR_STATUS + 0x400000, + smnPCS_XGMI23_PCS_ERROR_STATUS + 0x500000, + smnPCS_XGMI23_PCS_ERROR_STATUS + 0x600000, + smnPCS_XGMI23_PCS_ERROR_STATUS + 0x700000 +}; + +static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = { + smnPCS_XGMI3X16_PCS_ERROR_STATUS, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000, + smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000 +}; + +static const int walf_pcs_err_status_reg_aldebaran[] = { + smnPCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000 +}; + +static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { + {"XGMI PCS DataLossErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, + {"XGMI PCS TrainingErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)}, + {"XGMI PCS CRCErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)}, + {"XGMI PCS BERExceededErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)}, + {"XGMI PCS TxMetaDataErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"XGMI PCS ReplayBufParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"XGMI PCS DataParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)}, + {"XGMI PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"XGMI PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"XGMI PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"XGMI PCS DeskewErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)}, + {"XGMI PCS DataStartupLimitErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"XGMI PCS FCInitTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"XGMI PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"XGMI PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"XGMI PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"XGMI PCS RecoveryAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"XGMI PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; + +static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { + {"WAFL PCS DataLossErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)}, + {"WAFL PCS TrainingErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)}, + {"WAFL PCS CRCErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)}, + {"WAFL PCS BERExceededErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)}, + {"WAFL PCS TxMetaDataErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"WAFL PCS ReplayBufParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"WAFL PCS DataParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)}, + {"WAFL PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"WAFL PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"WAFL PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"WAFL PCS DeskewErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)}, + {"WAFL PCS DataStartupLimitErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"WAFL PCS FCInitTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"WAFL PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"WAFL PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"WAFL PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"WAFL PCS RecoveryAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"WAFL PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; /** * DOC: AMDGPU XGMI Support @@ -68,67 +199,55 @@ void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) * */ +static struct attribute amdgpu_xgmi_hive_id = { + .name = "xgmi_hive_id", + .mode = S_IRUGO +}; -static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct amdgpu_hive_info *hive = - container_of(attr, struct amdgpu_hive_info, dev_attr); +static struct attribute *amdgpu_xgmi_hive_attrs[] = { + &amdgpu_xgmi_hive_id, + NULL +}; - return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id); -} - -static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev, - struct amdgpu_hive_info *hive) +static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj, + struct attribute *attr, char *buf) { - int ret = 0; + struct amdgpu_hive_info *hive = container_of( + kobj, struct amdgpu_hive_info, kobj); - if (WARN_ON(hive->kobj)) - return -EINVAL; + if (attr == &amdgpu_xgmi_hive_id) + return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id); - hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj); - if (!hive->kobj) { - dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n"); - return -EINVAL; - } - - hive->dev_attr = (struct device_attribute) { - .attr = { - .name = "xgmi_hive_id", - .mode = S_IRUGO, - - }, - .show = amdgpu_xgmi_show_hive_id, - }; - - ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr); - if (ret) { - dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n"); - kobject_del(hive->kobj); - kobject_put(hive->kobj); - hive->kobj = NULL; - } - - return ret; + return 0; } -static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev, - struct amdgpu_hive_info *hive) +static void amdgpu_xgmi_hive_release(struct kobject *kobj) { - sysfs_remove_file(hive->kobj, &hive->dev_attr.attr); - kobject_del(hive->kobj); - kobject_put(hive->kobj); - hive->kobj = NULL; + struct amdgpu_hive_info *hive = container_of( + kobj, struct amdgpu_hive_info, kobj); + + mutex_destroy(&hive->hive_lock); + kfree(hive); } +static const struct sysfs_ops amdgpu_xgmi_hive_ops = { + .show = amdgpu_xgmi_show_attrs, +}; + +struct kobj_type amdgpu_xgmi_hive_type = { + .release = amdgpu_xgmi_hive_release, + .sysfs_ops = &amdgpu_xgmi_hive_ops, + .default_attrs = amdgpu_xgmi_hive_attrs, +}; + static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); - return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id); + return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id); } @@ -138,7 +257,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in; uint64_t fica_out; unsigned int error_count = 0; @@ -146,18 +265,18 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); - fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in); + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in); if (fica_out != 0x1f) pr_err("xGMI error counters not enabled!\n"); - fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in); + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in); if ((fica_out & 0xffff) == 2) error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63); - adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); + adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); - return snprintf(buf, PAGE_SIZE, "%d\n", error_count); + return sysfs_emit(buf, "%u\n", error_count); } @@ -184,8 +303,8 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, /* Create sysfs link to hive info folder on the first device */ - if (adev != hive->adev) { - ret = sysfs_create_link(&adev->dev->kobj, hive->kobj, + if (hive->kobj.parent != (&adev->dev->kobj)) { + ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj, "xgmi_hive_info"); if (ret) { dev_err(adev->dev, "XGMI: Failed to create link to hive info"); @@ -193,9 +312,9 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, } } - sprintf(node, "node%d", hive->number_devices); + sprintf(node, "node%d", atomic_read(&hive->number_devices)); /* Create sysfs link form the hive folder to yourself */ - ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node); + ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node); if (ret) { dev_err(adev->dev, "XGMI: Failed to create link from hive info"); goto remove_link; @@ -205,7 +324,7 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, remove_link: - sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique); + sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique); remove_file: device_remove_file(adev->dev, &dev_attr_xgmi_device_id); @@ -217,123 +336,157 @@ success: static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) { + char node[10]; + memset(node, 0, sizeof(node)); + device_remove_file(adev->dev, &dev_attr_xgmi_device_id); - sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique); - sysfs_remove_link(hive->kobj, adev->ddev->unique); + device_remove_file(adev->dev, &dev_attr_xgmi_error); + + if (hive->kobj.parent != (&adev->dev->kobj)) + sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info"); + + sprintf(node, "node%d", atomic_read(&hive->number_devices)); + sysfs_remove_link(&hive->kobj, node); + } -struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock) +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) { - int i; - struct amdgpu_hive_info *tmp; + struct amdgpu_hive_info *hive = NULL; + int ret; if (!adev->gmc.xgmi.hive_id) return NULL; + if (adev->hive) { + kobject_get(&adev->hive->kobj); + return adev->hive; + } + mutex_lock(&xgmi_mutex); - for (i = 0 ; i < hive_count; ++i) { - tmp = &xgmi_hives[i]; - if (tmp->hive_id == adev->gmc.xgmi.hive_id) { - if (lock) - mutex_lock(&tmp->hive_lock); - mutex_unlock(&xgmi_mutex); - return tmp; - } + list_for_each_entry(hive, &xgmi_hive_list, node) { + if (hive->hive_id == adev->gmc.xgmi.hive_id) + goto pro_end; } - if (i >= AMDGPU_MAX_XGMI_HIVE) { - mutex_unlock(&xgmi_mutex); - return NULL; + + hive = kzalloc(sizeof(*hive), GFP_KERNEL); + if (!hive) { + dev_err(adev->dev, "XGMI: allocation failed\n"); + hive = NULL; + goto pro_end; } /* initialize new hive if not exist */ - tmp = &xgmi_hives[hive_count++]; - - if (amdgpu_xgmi_sysfs_create(adev, tmp)) { - mutex_unlock(&xgmi_mutex); - return NULL; + ret = kobject_init_and_add(&hive->kobj, + &amdgpu_xgmi_hive_type, + &adev->dev->kobj, + "%s", "xgmi_hive_info"); + if (ret) { + dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); + kfree(hive); + hive = NULL; + goto pro_end; } - tmp->adev = adev; - tmp->hive_id = adev->gmc.xgmi.hive_id; - INIT_LIST_HEAD(&tmp->device_list); - mutex_init(&tmp->hive_lock); - mutex_init(&tmp->reset_lock); + hive->hive_id = adev->gmc.xgmi.hive_id; + INIT_LIST_HEAD(&hive->device_list); + INIT_LIST_HEAD(&hive->node); + mutex_init(&hive->hive_lock); + atomic_set(&hive->in_reset, 0); + atomic_set(&hive->number_devices, 0); + task_barrier_init(&hive->tb); + hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; + hive->hi_req_gpu = NULL; + /* + * hive pstate on boot is high in vega20 so we have to go to low + * pstate on after boot. + */ + hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE; + list_add_tail(&hive->node, &xgmi_hive_list); - if (lock) - mutex_lock(&tmp->hive_lock); - tmp->pstate = -1; +pro_end: + if (hive) + kobject_get(&hive->kobj); mutex_unlock(&xgmi_mutex); + return hive; +} - return tmp; +void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive) +{ + if (hive) + kobject_put(&hive->kobj); } int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) { int ret = 0; - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); - struct amdgpu_device *tmp_adev; - bool update_hive_pstate = true; - bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20; + struct amdgpu_hive_info *hive; + struct amdgpu_device *request_adev; + bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20; + bool init_low; + hive = amdgpu_get_xgmi_hive(adev); if (!hive) return 0; + request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev; + init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN; + amdgpu_put_xgmi_hive(hive); + /* fw bug so temporarily disable pstate switching */ + return 0; + + if (!hive || adev->asic_type != CHIP_VEGA20) + return 0; + mutex_lock(&hive->hive_lock); - if (hive->pstate == pstate) { - adev->pstate = is_high_pstate ? pstate : adev->pstate; - goto out; - } + if (is_hi_req) + hive->hi_req_count++; + else + hive->hi_req_count--; - dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); + /* + * Vega20 only needs single peer to request pstate high for the hive to + * go high but all peers must request pstate low for the hive to go low + */ + if (hive->pstate == pstate || + (!is_hi_req && hive->hi_req_count && !init_low)) + goto out; - if (is_support_sw_smu_xgmi(adev)) - ret = smu_set_xgmi_pstate(&adev->smu, pstate); - else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_xgmi_pstate) - ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, - pstate); + dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate); + ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate); if (ret) { - dev_err(adev->dev, + dev_err(request_adev->dev, "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", - adev->gmc.xgmi.node_id, - adev->gmc.xgmi.hive_id, ret); + request_adev->gmc.xgmi.node_id, + request_adev->gmc.xgmi.hive_id, ret); goto out; } - /* Update device pstate */ - adev->pstate = pstate; - - /* - * Update the hive pstate only all devices of the hive - * are in the same pstate - */ - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - if (tmp_adev->pstate != adev->pstate) { - update_hive_pstate = false; - break; - } - } - if (update_hive_pstate || is_high_pstate) + if (init_low) + hive->pstate = hive->hi_req_count ? + hive->pstate : AMDGPU_XGMI_PSTATE_MIN; + else { hive->pstate = pstate; - + hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ? + adev : NULL; + } out: mutex_unlock(&hive->hive_lock); - return ret; } int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev) { - int ret = -EINVAL; + int ret; /* Each psp need to set the latest topology */ ret = psp_xgmi_set_topology_info(&adev->psp, - hive->number_devices, + atomic_read(&hive->number_devices), &adev->psp.xgmi_context.top_info); if (ret) dev_err(adev->dev, @@ -345,18 +498,63 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev } +/* + * NOTE psp_xgmi_node_info.num_hops layout is as follows: + * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved) + * num_hops[5:3] = reserved + * num_hops[2:0] = number of hops + */ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev) { struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; + uint8_t num_hops_mask = 0x7; + int i; + + for (i = 0 ; i < top->num_nodes; ++i) + if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) + return top->nodes[i].num_hops & num_hops_mask; + return -EINVAL; +} + +int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev) +{ + struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; int i; for (i = 0 ; i < top->num_nodes; ++i) if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) - return top->nodes[i].num_hops; + return top->nodes[i].num_links; return -EINVAL; } +/* + * Devices that support extended data require the entire hive to initialize with + * the shared memory buffer flag set. + * + * Hive locks and conditions apply - see amdgpu_xgmi_add_device + */ +static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive, + bool set_extended_data) +{ + struct amdgpu_device *tmp_adev; + int ret; + + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false); + if (ret) { + dev_err(tmp_adev->dev, + "XGMI: Failed to initialize xgmi session for data partition %i\n", + set_extended_data); + return ret; + } + + } + + return 0; +} + int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { struct psp_xgmi_topology_info *top_info; @@ -369,7 +567,15 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (!adev->gmc.xgmi.supported) return 0; - if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (!adev->gmc.xgmi.pending_reset && + amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + ret = psp_xgmi_initialize(&adev->psp, false, true); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to initialize xgmi session\n"); + return ret; + } + ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); if (ret) { dev_err(adev->dev, @@ -388,7 +594,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; } - hive = amdgpu_get_xgmi_hive(adev, 1); + hive = amdgpu_get_xgmi_hive(adev); if (!hive) { ret = -EINVAL; dev_err(adev->dev, @@ -396,9 +602,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); goto exit; } - - /* Set default device pstate */ - adev->pstate = -1; + mutex_lock(&hive->hive_lock); top_info = &adev->psp.xgmi_context.top_info; @@ -406,9 +610,12 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) list_for_each_entry(entry, &hive->device_list, head) top_info->nodes[count++].node_id = entry->node_id; top_info->num_nodes = count; - hive->number_devices = count; + atomic_set(&hive->number_devices, count); + + task_barrier_add_task(&hive->tb); - if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + if (!adev->gmc.xgmi.pending_reset && + amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { /* update node list for other device in the hive */ if (tmp_adev != adev) { @@ -419,63 +626,106 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } ret = amdgpu_xgmi_update_topology(hive, tmp_adev); if (ret) - goto exit; + goto exit_unlock; } /* get latest topology info for each device from psp */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, - &tmp_adev->psp.xgmi_context.top_info); + &tmp_adev->psp.xgmi_context.top_info, false); if (ret) { dev_err(tmp_adev->dev, "XGMI: Get topology failure on device %llx, hive %llx, ret %d", tmp_adev->gmc.xgmi.node_id, tmp_adev->gmc.xgmi.hive_id, ret); /* To do : continue with some node failed or disable the whole hive */ - goto exit; + goto exit_unlock; } } + + /* get topology again for hives that support extended data */ + if (adev->psp.xgmi_context.supports_extended_data) { + + /* initialize the hive to get extended data. */ + ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true); + if (ret) + goto exit_unlock; + + /* get the extended data. */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, + &tmp_adev->psp.xgmi_context.top_info, true); + if (ret) { + dev_err(tmp_adev->dev, + "XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d", + tmp_adev->gmc.xgmi.node_id, + tmp_adev->gmc.xgmi.hive_id, ret); + goto exit_unlock; + } + } + + /* initialize the hive to get non-extended data for the next round. */ + ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false); + if (ret) + goto exit_unlock; + + } } - if (!ret) + if (!ret && !adev->gmc.xgmi.pending_reset) ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); - +exit_unlock: mutex_unlock(&hive->hive_lock); exit: - if (!ret) + if (!ret) { + adev->hive = hive; dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n", adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); - else + } else { + amdgpu_put_xgmi_hive(hive); dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n", adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, ret); + } return ret; } -void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) +int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) { - struct amdgpu_hive_info *hive; + struct amdgpu_hive_info *hive = adev->hive; if (!adev->gmc.xgmi.supported) - return; + return -EINVAL; - hive = amdgpu_get_xgmi_hive(adev, 1); if (!hive) - return; + return -EINVAL; - if (!(hive->number_devices--)) { - amdgpu_xgmi_sysfs_destroy(adev, hive); - mutex_destroy(&hive->hive_lock); - mutex_destroy(&hive->reset_lock); - } else { - amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); - mutex_unlock(&hive->hive_lock); + mutex_lock(&hive->hive_lock); + task_barrier_rem_task(&hive->tb); + amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); + if (hive->hi_req_gpu == adev) + hive->hi_req_gpu = NULL; + list_del(&adev->gmc.xgmi.head); + mutex_unlock(&hive->hive_lock); + + amdgpu_put_xgmi_hive(hive); + adev->hive = NULL; + + if (atomic_dec_return(&hive->number_devices) == 0) { + /* Remove the hive from global hive list */ + mutex_lock(&xgmi_mutex); + list_del(&hive->node); + mutex_unlock(&xgmi_mutex); + + amdgpu_put_xgmi_hive(hive); } + + return psp_xgmi_terminate(&adev->psp); } -int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) +static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) { int r; struct ras_ih_if ih_info = { @@ -483,13 +733,14 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "xgmi_wafl_err_count", - .debugfs_name = "xgmi_wafl_err_inject", }; if (!adev->gmc.xgmi.supported || adev->gmc.xgmi.num_physical_nodes == 0) return 0; + adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev); + if (!adev->gmc.xgmi.ras_if) { adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); if (!adev->gmc.xgmi.ras_if) @@ -497,7 +748,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL; adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; adev->gmc.xgmi.ras_if->sub_block_index = 0; - strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl"); } ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if; r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if, @@ -510,7 +760,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) return r; } -void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) +static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) && adev->gmc.xgmi.ras_if) { @@ -523,3 +773,177 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) kfree(ras_if); } } + +uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, + uint64_t addr) +{ + struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; + return (addr + xgmi->physical_node_id * xgmi->node_segment_size); +} + +static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg) +{ + WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF); + WREG32_PCIE(pcs_status_reg, 0); +} + +static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) + pcs_clear_status(adev, + xgmi_pcs_err_status_reg_arct[i]); + break; + case CHIP_VEGA20: + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) + pcs_clear_status(adev, + xgmi_pcs_err_status_reg_vg20[i]); + break; + case CHIP_ALDEBARAN: + for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) + pcs_clear_status(adev, + xgmi23_pcs_err_status_reg_aldebaran[i]); + for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) + pcs_clear_status(adev, + xgmi23_pcs_err_status_reg_aldebaran[i]); + for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) + pcs_clear_status(adev, + walf_pcs_err_status_reg_aldebaran[i]); + break; + default: + break; + } +} + +static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, + uint32_t value, + uint32_t *ue_count, + uint32_t *ce_count, + bool is_xgmi_pcs) +{ + int i; + int ue_cnt; + + if (is_xgmi_pcs) { + /* query xgmi pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) { + ue_cnt = (value & + xgmi_pcs_ras_fields[i].pcs_err_mask) >> + xgmi_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + xgmi_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } else { + /* query wafl pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) { + ue_cnt = (value & + wafl_pcs_ras_fields[i].pcs_err_mask) >> + wafl_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + wafl_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } + + return 0; +} + +static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + int i; + uint32_t data; + uint32_t ue_cnt = 0, ce_cnt = 0; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + case CHIP_VEGA20: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + case CHIP_ALDEBARAN: + /* check xgmi23 pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) { + data = RREG32_PCIE(xgmi23_pcs_err_status_reg_aldebaran[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check xgmi3x16 pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) { + data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) { + data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + default: + dev_warn(adev->dev, "XGMI RAS error query not supported"); + break; + } + + adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev); + + err_data->ue_count += ue_cnt; + err_data->ce_count += ce_cnt; + + return 0; +} + +const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = { + .ras_late_init = amdgpu_xgmi_ras_late_init, + .ras_fini = amdgpu_xgmi_ras_fini, + .query_ras_error_count = amdgpu_xgmi_query_ras_error_count, + .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index bbf504ff7051..d2189bf7d428 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -22,29 +22,47 @@ #ifndef __AMDGPU_XGMI_H__ #define __AMDGPU_XGMI_H__ +#include <drm/task_barrier.h> #include "amdgpu_psp.h" + struct amdgpu_hive_info { - uint64_t hive_id; - struct list_head device_list; - int number_devices; - struct mutex hive_lock, reset_lock; - struct kobject *kobj; - struct device_attribute dev_attr; - struct amdgpu_device *adev; - int pstate; /*0 -- low , 1 -- high , -1 unknown*/ + struct kobject kobj; + uint64_t hive_id; + struct list_head device_list; + struct list_head node; + atomic_t number_devices; + struct mutex hive_lock; + atomic_t in_reset; + int hi_req_count; + struct amdgpu_device *hi_req_gpu; + struct task_barrier tb; + enum { + AMDGPU_XGMI_PSTATE_MIN, + AMDGPU_XGMI_PSTATE_MAX_VEGA20, + AMDGPU_XGMI_PSTATE_UNKNOWN + } pstate; +}; + +struct amdgpu_pcs_ras_field { + const char *err_name; + uint32_t pcs_err_mask; + uint32_t pcs_err_shift; }; -struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); +extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs; +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev); +void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev); -void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); +int amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate); int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); -int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); -void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); - +int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev); +uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, + uint64_t addr); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h new file mode 100644 index 000000000000..7326b6c1b71c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -0,0 +1,334 @@ +/* + * Copyright 2018-2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef AMDGV_SRIOV_MSG__H_ +#define AMDGV_SRIOV_MSG__H_ + +/* unit in kilobytes */ +#define AMD_SRIOV_MSG_VBIOS_OFFSET 0 +#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64 +#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB +#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4 + +/* + * layout + * 0 64KB 65KB 66KB + * | VBIOS | PF2VF | VF2PF | Bad Page | ... + * | 64KB | 1KB | 1KB | + */ +#define AMD_SRIOV_MSG_SIZE_KB 1 +#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB +#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) + +/* + * PF2VF history log: + * v1 defined in amdgim + * v2 current + * + * VF2PF history log: + * v1 defined in amdgim + * v2 defined in amdgim + * v3 current + */ +#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER 2 +#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER 3 + +#define AMD_SRIOV_MSG_RESERVE_UCODE 24 + +#define AMD_SRIOV_MSG_RESERVE_VCN_INST 4 + +enum amd_sriov_ucode_engine_id { + AMD_SRIOV_UCODE_ID_VCE = 0, + AMD_SRIOV_UCODE_ID_UVD, + AMD_SRIOV_UCODE_ID_MC, + AMD_SRIOV_UCODE_ID_ME, + AMD_SRIOV_UCODE_ID_PFP, + AMD_SRIOV_UCODE_ID_CE, + AMD_SRIOV_UCODE_ID_RLC, + AMD_SRIOV_UCODE_ID_RLC_SRLC, + AMD_SRIOV_UCODE_ID_RLC_SRLG, + AMD_SRIOV_UCODE_ID_RLC_SRLS, + AMD_SRIOV_UCODE_ID_MEC, + AMD_SRIOV_UCODE_ID_MEC2, + AMD_SRIOV_UCODE_ID_SOS, + AMD_SRIOV_UCODE_ID_ASD, + AMD_SRIOV_UCODE_ID_TA_RAS, + AMD_SRIOV_UCODE_ID_TA_XGMI, + AMD_SRIOV_UCODE_ID_SMC, + AMD_SRIOV_UCODE_ID_SDMA, + AMD_SRIOV_UCODE_ID_SDMA2, + AMD_SRIOV_UCODE_ID_VCN, + AMD_SRIOV_UCODE_ID_DMCU, + AMD_SRIOV_UCODE_ID__MAX +}; + +#pragma pack(push, 1) // PF2VF / VF2PF data areas are byte packed + +union amd_sriov_msg_feature_flags { + struct { + uint32_t error_log_collect : 1; + uint32_t host_load_ucodes : 1; + uint32_t host_flr_vramlost : 1; + uint32_t mm_bw_management : 1; + uint32_t pp_one_vf_mode : 1; + uint32_t reg_indirect_acc : 1; + uint32_t reserved : 26; + } flags; + uint32_t all; +}; + +union amd_sriov_reg_access_flags { + struct { + uint32_t vf_reg_access_ih : 1; + uint32_t vf_reg_access_mmhub : 1; + uint32_t vf_reg_access_gc : 1; + uint32_t reserved : 29; + } flags; + uint32_t all; +}; + +union amd_sriov_msg_os_info { + struct { + uint32_t windows : 1; + uint32_t reserved : 31; + } info; + uint32_t all; +}; + +struct amd_sriov_msg_uuid_info { + union { + struct { + uint32_t did : 16; + uint32_t fcn : 8; + uint32_t asic_7 : 8; + }; + uint32_t time_low; + }; + + struct { + uint32_t time_mid : 16; + uint32_t time_high : 12; + uint32_t version : 4; + }; + + struct { + struct { + uint8_t clk_seq_hi : 6; + uint8_t variant : 2; + }; + union { + uint8_t clk_seq_low; + uint8_t asic_6; + }; + uint16_t asic_4; + }; + + uint32_t asic_0; +}; + +struct amd_sriov_msg_pf2vf_info_header { + /* the total structure size in byte */ + uint32_t size; + /* version of this structure, written by the HOST */ + uint32_t version; + /* reserved */ + uint32_t reserved[2]; +}; + +struct amd_sriov_msg_pf2vf_info { + /* header contains size and version */ + struct amd_sriov_msg_pf2vf_info_header header; + /* use private key from mailbox 2 to create checksum */ + uint32_t checksum; + /* The features flags of the HOST driver supports */ + union amd_sriov_msg_feature_flags feature_flags; + /* (max_width * max_height * fps) / (16 * 16) */ + uint32_t hevc_enc_max_mb_per_second; + /* (max_width * max_height) / (16 * 16) */ + uint32_t hevc_enc_max_mb_per_frame; + /* (max_width * max_height * fps) / (16 * 16) */ + uint32_t avc_enc_max_mb_per_second; + /* (max_width * max_height) / (16 * 16) */ + uint32_t avc_enc_max_mb_per_frame; + /* MEC FW position in BYTE from the start of VF visible frame buffer */ + uint64_t mecfw_offset; + /* MEC FW size in BYTE */ + uint32_t mecfw_size; + /* UVD FW position in BYTE from the start of VF visible frame buffer */ + uint64_t uvdfw_offset; + /* UVD FW size in BYTE */ + uint32_t uvdfw_size; + /* VCE FW position in BYTE from the start of VF visible frame buffer */ + uint64_t vcefw_offset; + /* VCE FW size in BYTE */ + uint32_t vcefw_size; + /* Bad pages block position in BYTE */ + uint32_t bp_block_offset_low; + uint32_t bp_block_offset_high; + /* Bad pages block size in BYTE */ + uint32_t bp_block_size; + /* frequency for VF to update the VF2PF area in msec, 0 = manual */ + uint32_t vf2pf_update_interval_ms; + /* identification in ROCm SMI */ + uint64_t uuid; + uint32_t fcn_idx; + /* flags to indicate which register access method VF should use */ + union amd_sriov_reg_access_flags reg_access_flags; + /* MM BW management */ + struct { + uint32_t decode_max_dimension_pixels; + uint32_t decode_max_frame_pixels; + uint32_t encode_max_dimension_pixels; + uint32_t encode_max_frame_pixels; + } mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST]; + /* UUID info */ + struct amd_sriov_msg_uuid_info uuid_info; + /* pcie atomic Ops info */ + uint32_t pcie_atomic_ops_enabled_flags; + /* reserved */ + uint32_t reserved[256 - 48]; +}; + +struct amd_sriov_msg_vf2pf_info_header { + /* the total structure size in byte */ + uint32_t size; + /* version of this structure, written by the guest */ + uint32_t version; + /* reserved */ + uint32_t reserved[2]; +}; + +struct amd_sriov_msg_vf2pf_info { + /* header contains size and version */ + struct amd_sriov_msg_vf2pf_info_header header; + uint32_t checksum; + /* driver version */ + uint8_t driver_version[64]; + /* driver certification, 1=WHQL, 0=None */ + uint32_t driver_cert; + /* guest OS type and version */ + union amd_sriov_msg_os_info os_info; + /* guest fb information in the unit of MB */ + uint32_t fb_usage; + /* guest gfx engine usage percentage */ + uint32_t gfx_usage; + /* guest gfx engine health percentage */ + uint32_t gfx_health; + /* guest compute engine usage percentage */ + uint32_t compute_usage; + /* guest compute engine health percentage */ + uint32_t compute_health; + /* guest avc engine usage percentage. 0xffff means N/A */ + uint32_t avc_enc_usage; + /* guest avc engine health percentage. 0xffff means N/A */ + uint32_t avc_enc_health; + /* guest hevc engine usage percentage. 0xffff means N/A */ + uint32_t hevc_enc_usage; + /* guest hevc engine usage percentage. 0xffff means N/A */ + uint32_t hevc_enc_health; + /* combined encode/decode usage */ + uint32_t encode_usage; + uint32_t decode_usage; + /* Version of PF2VF that VF understands */ + uint32_t pf2vf_version_required; + /* additional FB usage */ + uint32_t fb_vis_usage; + uint32_t fb_vis_size; + uint32_t fb_size; + /* guest ucode data, each one is 1.25 Dword */ + struct { + uint8_t id; + uint32_t version; + } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE]; + uint64_t dummy_page_addr; + + /* reserved */ + uint32_t reserved[256-70]; +}; + +/* mailbox message send from guest to host */ +enum amd_sriov_mailbox_request_message { + MB_REQ_MSG_REQ_GPU_INIT_ACCESS = 1, + MB_REQ_MSG_REL_GPU_INIT_ACCESS, + MB_REQ_MSG_REQ_GPU_FINI_ACCESS, + MB_REQ_MSG_REL_GPU_FINI_ACCESS, + MB_REQ_MSG_REQ_GPU_RESET_ACCESS, + MB_REQ_MSG_REQ_GPU_INIT_DATA, + + MB_REQ_MSG_LOG_VF_ERROR = 200, +}; + +/* mailbox message send from host to guest */ +enum amd_sriov_mailbox_response_message { + MB_RES_MSG_CLR_MSG_BUF = 0, + MB_RES_MSG_READY_TO_ACCESS_GPU = 1, + MB_RES_MSG_FLR_NOTIFICATION, + MB_RES_MSG_FLR_NOTIFICATION_COMPLETION, + MB_RES_MSG_SUCCESS, + MB_RES_MSG_FAIL, + MB_RES_MSG_QUERY_ALIVE, + MB_RES_MSG_GPU_INIT_DATA_READY, + + MB_RES_MSG_TEXT_MESSAGE = 255 +}; + +/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */ +enum amd_sriov_gpu_init_data_version { + GPU_INIT_DATA_READY_V1 = 1, +}; + +#pragma pack(pop) // Restore previous packing option + +/* checksum function between host and guest */ +unsigned int amd_sriov_msg_checksum(void *obj, + unsigned long obj_size, + unsigned int key, + unsigned int checksum); + +/* assertion at compile time */ +#ifdef __linux__ +#define stringification(s) _stringification(s) +#define _stringification(s) #s + +_Static_assert( + sizeof(struct amd_sriov_msg_vf2pf_info) == AMD_SRIOV_MSG_SIZE_KB << 10, + "amd_sriov_msg_vf2pf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB"); + +_Static_assert( + sizeof(struct amd_sriov_msg_pf2vf_info) == AMD_SRIOV_MSG_SIZE_KB << 10, + "amd_sriov_msg_pf2vf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB"); + +_Static_assert( + AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0, + "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4"); + +_Static_assert( + AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX, + "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX"); + +#undef _stringification +#undef stringification +#endif + +#endif /* AMDGV_SRIOV_MSG__H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c index d9cc746af5e6..3ea557864320 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c @@ -73,10 +73,11 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: athub_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index ceb9aa4df0e7..ab6a07e5e8c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -27,7 +27,6 @@ #include "athub/athub_2_0_0_offset.h" #include "athub/athub_2_0_0_sh_mask.h" #include "athub/athub_2_0_0_default.h" -#include "navi10_enum.h" #include "soc15_common.h" @@ -37,9 +36,12 @@ athub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, { uint32_t def, data; + if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) + return; + def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) + if (enable) data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK; else data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK; @@ -54,10 +56,13 @@ athub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, { uint32_t def, data; + if (!((adev->cg_flags & AMD_CG_SUPPORT_MC_LS) && + (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))) + return; + def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) && - (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + if (enable) data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK; else data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK; @@ -72,14 +77,13 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[ATHUB_HWIP][0]) { + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): athub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c new file mode 100644 index 000000000000..2edefd10e56c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c @@ -0,0 +1,99 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "athub_v2_1.h" + +#include "athub/athub_2_1_0_offset.h" +#include "athub/athub_2_1_0_sh_mask.h" + +#include "soc15_common.h" + +static void +athub_v2_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) + data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK; + else + data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK; + + if (def != data) + WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data); +} + +static void +athub_v2_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) && + (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK; + else + data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK; + + if(def != data) + WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data); +} + +int athub_v2_1_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (adev->ip_versions[ATHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): + athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); + athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); + break; + default: + break; + } + + return 0; +} + +void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u32 *flags) +{ + int data; + + /* AMD_CG_SUPPORT_ATHUB_MGCG */ + data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL); + if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_ATHUB_MGCG; + + /* AMD_CG_SUPPORT_ATHUB_LS */ + if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_ATHUB_LS; +} diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h new file mode 100644 index 000000000000..5e6824c0f591 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __ATHUB_V2_1_H__ +#define __ATHUB_V2_1_H__ + +int athub_v2_1_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state); +void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u32 *flags); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index dd30f4e61a8c..6fa2229b7229 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -31,6 +31,7 @@ #define ATOM_DEBUG +#include "atomfirmware.h" #include "atom.h" #include "atom-names.h" #include "atom-bits.h" @@ -54,6 +55,8 @@ #define PLL_INDEX 2 #define PLL_DATA 3 +#define ATOM_CMD_TIMEOUT_SEC 20 + typedef struct { struct atom_context *ctx; uint32_t *ps, *ws; @@ -64,13 +67,13 @@ typedef struct { bool abort; } atom_exec_context; -int amdgpu_atom_debug = 0; -static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); -int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); +int amdgpu_atom_debug; +static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params); +int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params); static uint32_t atom_arg_mask[8] = - { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, -0xFF000000 }; + { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, + 0xFF000000 }; static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; static int atom_dst_to_src[8][4] = { @@ -86,7 +89,7 @@ static int atom_dst_to_src[8][4] = { }; static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; -static int debug_depth = 0; +static int debug_depth; #ifdef ATOM_DEBUG static void debug_print_spaces(int n) { @@ -112,11 +115,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, base++; break; case ATOM_IIO_READ: - temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)); + temp = ctx->card->reg_read(ctx->card, CU16(base + 1)); base += 3; break; case ATOM_IIO_WRITE: - ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); + ctx->card->reg_write(ctx->card, CU16(base + 1), temp); base += 3; break; case ATOM_IIO_CLEAR: @@ -744,8 +747,9 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) { + DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n", + ATOM_CMD_TIMEOUT_SEC); ctx->abort = true; } } else { @@ -1198,7 +1202,7 @@ static struct { atom_op_div32, ATOM_ARG_WS}, }; -static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) +static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params) { int base = CU16(ctx->cmd_table + 4 + 2 * index); int len, ws, ps, ptr; @@ -1259,7 +1263,7 @@ free: return ret; } -int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) +int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params) { int r; @@ -1296,12 +1300,168 @@ static void atom_index_iio(struct atom_context *ctx, int base) } } +static void atom_get_vbios_name(struct atom_context *ctx) +{ + unsigned char *p_rom; + unsigned char str_num; + unsigned short off_to_vbios_str; + unsigned char *c_ptr; + int name_size; + int i; + + const char *na = "--N/A--"; + char *back; + + p_rom = ctx->bios; + + str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS); + if (str_num != 0) { + off_to_vbios_str = + *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); + + c_ptr = (unsigned char *)(p_rom + off_to_vbios_str); + } else { + /* do not know where to find name */ + memcpy(ctx->name, na, 7); + ctx->name[7] = 0; + return; + } + + /* + * skip the atombios strings, usually 4 + * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type + */ + for (i = 0; i < str_num; i++) { + while (*c_ptr != 0) + c_ptr++; + c_ptr++; + } + + /* skip the following 2 chars: 0x0D 0x0A */ + c_ptr += 2; + + name_size = strnlen(c_ptr, STRLEN_LONG - 1); + memcpy(ctx->name, c_ptr, name_size); + back = ctx->name + name_size; + while ((*--back) == ' ') + ; + *(back + 1) = '\0'; +} + +static void atom_get_vbios_date(struct atom_context *ctx) +{ + unsigned char *p_rom; + unsigned char *date_in_rom; + + p_rom = ctx->bios; + + date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE; + + ctx->date[0] = '2'; + ctx->date[1] = '0'; + ctx->date[2] = date_in_rom[6]; + ctx->date[3] = date_in_rom[7]; + ctx->date[4] = '/'; + ctx->date[5] = date_in_rom[0]; + ctx->date[6] = date_in_rom[1]; + ctx->date[7] = '/'; + ctx->date[8] = date_in_rom[3]; + ctx->date[9] = date_in_rom[4]; + ctx->date[10] = ' '; + ctx->date[11] = date_in_rom[9]; + ctx->date[12] = date_in_rom[10]; + ctx->date[13] = date_in_rom[11]; + ctx->date[14] = date_in_rom[12]; + ctx->date[15] = date_in_rom[13]; + ctx->date[16] = '\0'; +} + +static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start, + int end, int maxlen) +{ + unsigned long str_off; + unsigned char *p_rom; + unsigned short str_len; + + str_off = 0; + str_len = strnlen(str, maxlen); + p_rom = ctx->bios; + + for (; start <= end; ++start) { + for (str_off = 0; str_off < str_len; ++str_off) { + if (str[str_off] != *(p_rom + start + str_off)) + break; + } + + if (str_off == str_len || str[str_off] == 0) + return p_rom + start; + } + return NULL; +} + +static void atom_get_vbios_pn(struct atom_context *ctx) +{ + unsigned char *p_rom; + unsigned short off_to_vbios_str; + unsigned char *vbios_str; + int count; + + off_to_vbios_str = 0; + p_rom = ctx->bios; + + if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) { + off_to_vbios_str = + *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); + + vbios_str = (unsigned char *)(p_rom + off_to_vbios_str); + } else { + vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER; + } + + if (*vbios_str == 0) { + vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64); + if (vbios_str == NULL) + vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1; + } + if (vbios_str != NULL && *vbios_str == 0) + vbios_str++; + + if (vbios_str != NULL) { + count = 0; + while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' && + vbios_str[count] <= 'z') { + ctx->vbios_pn[count] = vbios_str[count]; + count++; + } + + ctx->vbios_pn[count] = 0; + } +} + +static void atom_get_vbios_version(struct atom_context *ctx) +{ + unsigned char *vbios_ver; + + /* find anchor ATOMBIOSBK-AMD */ + vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64); + if (vbios_ver != NULL) { + /* skip ATOMBIOSBK-AMD VER */ + vbios_ver += 18; + memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL); + } else { + ctx->vbios_ver_str[0] = '\0'; + } +} + struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) { int base; struct atom_context *ctx = kzalloc(sizeof(struct atom_context), GFP_KERNEL); char *str; + struct _ATOM_ROM_HEADER *atom_rom_header; + struct _ATOM_MASTER_DATA_TABLE *master_table; + struct _ATOM_FIRMWARE_INFO *atom_fw_info; u16 idx; if (!ctx) @@ -1350,6 +1510,21 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); } + atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base); + if (atom_rom_header->usMasterDataTableOffset != 0) { + master_table = (struct _ATOM_MASTER_DATA_TABLE *) + CSTR(atom_rom_header->usMasterDataTableOffset); + if (master_table->ListOfDataTables.FirmwareInfo != 0) { + atom_fw_info = (struct _ATOM_FIRMWARE_INFO *) + CSTR(master_table->ListOfDataTables.FirmwareInfo); + ctx->version = atom_fw_info->ulFirmwareRevision; + } + } + + atom_get_vbios_name(ctx); + atom_get_vbios_pn(ctx); + atom_get_vbios_date(ctx); + atom_get_vbios_version(ctx); return ctx; } @@ -1385,8 +1560,8 @@ void amdgpu_atom_destroy(struct atom_context *ctx) } bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, - uint16_t * size, uint8_t * frev, uint8_t * crev, - uint16_t * data_start) + uint16_t *size, uint8_t *frev, uint8_t *crev, + uint16_t *data_start) { int offset = index * 2 + 4; int idx = CU16(ctx->data_table + offset); @@ -1405,8 +1580,8 @@ bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, return true; } -bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, - uint8_t * crev) +bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, + uint8_t *crev) { int offset = index * 2 + 4; int idx = CU16(ctx->cmd_table + offset); diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index 4205bbe5d8d7..0c1839824520 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -112,12 +112,14 @@ struct drm_device; #define ATOM_IO_SYSIO 2 #define ATOM_IO_IIO 0x80 +#define STRLEN_NORMAL 32 +#define STRLEN_LONG 64 +#define STRLEN_VERYLONG 254 + struct card_info { struct drm_device *dev; void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */ - void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ - uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */ void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */ void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ @@ -142,6 +144,12 @@ struct atom_context { uint32_t *scratch; int scratch_size_bytes; char vbios_version[20]; + + uint8_t name[STRLEN_LONG]; + uint8_t vbios_pn[STRLEN_LONG]; + uint32_t version; + uint8_t vbios_ver_str[STRLEN_NORMAL]; + uint8_t date[STRLEN_NORMAL]; }; extern int amdgpu_atom_debug; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 213e62a28ba0..afad094f84c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -41,7 +41,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); SET_CRTC_OVERSCAN_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); @@ -84,7 +84,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); ENABLE_SCALER_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); @@ -114,7 +114,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); ENABLE_CRTC_PS_ALLOCATION args; @@ -131,7 +131,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); ENABLE_CRTC_PS_ALLOCATION args; @@ -147,7 +147,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); BLANK_CRTC_PS_ALLOCATION args; @@ -163,7 +163,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; @@ -192,7 +192,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); SET_CRTC_USING_DTD_TIMING_PARAMETERS args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); u16 misc = 0; @@ -307,7 +307,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_encoder *encoder = amdgpu_crtc->encoder; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -588,7 +588,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, struct amdgpu_atom_ss *ss) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u8 frev, crev; int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); union set_pixel_clock args; @@ -749,7 +749,7 @@ int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); @@ -818,7 +818,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); u32 pll_clock = mode->clock; @@ -851,7 +851,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode pll->reference_div = amdgpu_crtc->pll_reference_div; pll->post_div = amdgpu_crtc->pll_post_div; - amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock, + amdgpu_pll_compute(adev, pll, amdgpu_crtc->adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id, diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 6858cde9fc5d..f327becb022f 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -60,7 +60,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, u8 delay, u8 *ack) { struct drm_device *dev = chan->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union aux_channel_transaction args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; @@ -186,16 +186,12 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector) { - int ret; - amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd; - amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev; amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer; - ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux); - if (!ret) - amdgpu_connector->ddc_bus->has_aux = true; + amdgpu_connector->ddc_bus->aux.drm_dev = amdgpu_connector->base.dev; - WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret); + drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux); + amdgpu_connector->ddc_bus->has_aux = true; } /***** general DP utility functions *****/ @@ -311,7 +307,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev, u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector) { struct drm_device *dev = amdgpu_connector->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, amdgpu_connector->ddc_bus->rec.i2c_id, 0); @@ -334,6 +330,22 @@ static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connect buf[0], buf[1], buf[2]); } +static void amdgpu_atombios_dp_ds_ports(struct amdgpu_connector *amdgpu_connector) +{ + struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; + int ret; + + if (dig_connector->dpcd[DP_DPCD_REV] > 0x10) { + ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, + DP_DOWNSTREAM_PORT_0, + dig_connector->downstream_ports, + DP_MAX_DOWNSTREAM_PORTS); + if (ret) + memset(dig_connector->downstream_ports, 0, + DP_MAX_DOWNSTREAM_PORTS); + } +} + int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector) { struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; @@ -349,7 +361,7 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector) dig_connector->dpcd); amdgpu_atombios_dp_probe_oui(amdgpu_connector); - + amdgpu_atombios_dp_ds_ports(amdgpu_connector); return 0; } @@ -361,7 +373,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - struct amdgpu_connector_atom_dig *dig_connector; int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector); u8 tmp; @@ -369,8 +380,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, if (!amdgpu_connector->con_priv) return panel_mode; - dig_connector = amdgpu_connector->con_priv; - if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, @@ -603,7 +612,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i dp_info->tries = 0; voltage = 0xff; while (1) { - drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); + drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, dp_info->link_status) <= 0) { @@ -668,7 +677,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i dp_info->tries = 0; channel_eq = false; while (1) { - drm_dp_link_train_channel_eq_delay(dp_info->dpcd); + drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, dp_info->link_status) <= 0) { @@ -711,9 +720,8 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig; struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector_atom_dig *dig_connector; struct amdgpu_atombios_dp_link_train_info dp_info; @@ -721,7 +729,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, if (!amdgpu_encoder->enc_priv) return; - dig = amdgpu_encoder->enc_priv; amdgpu_connector = to_amdgpu_connector(connector); if (!amdgpu_connector->con_priv) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 1e94a9b652f7..6134ed964027 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -70,7 +70,7 @@ u8 amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) { struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return 0; @@ -84,7 +84,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode { struct drm_encoder *encoder = &amdgpu_encoder->base; struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder_atom_dig *dig; if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) @@ -152,7 +152,7 @@ amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd) struct amdgpu_backlight_privdata *pdata = bl_get_data(bd); struct amdgpu_encoder *amdgpu_encoder = pdata->encoder; struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); } @@ -166,12 +166,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode struct drm_connector *drm_connector) { struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct backlight_device *bd; struct backlight_properties props; struct amdgpu_backlight_privdata *pdata; struct amdgpu_encoder_atom_dig *dig; - u8 backlight_level; char bl_name[16]; /* Mac laptops with multiple GPUs use the gmux driver for backlight @@ -207,8 +206,6 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode pdata->encoder = amdgpu_encoder; - backlight_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); - dig = amdgpu_encoder->enc_priv; dig->bl_dev = bd; @@ -229,7 +226,7 @@ void amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder) { struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct backlight_device *bd = NULL; struct amdgpu_encoder_atom_dig *dig; @@ -319,7 +316,7 @@ static void amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); DAC_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; @@ -382,7 +379,7 @@ static void amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); union dvo_encoder_control args; int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); @@ -499,10 +496,8 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) } else { return ATOM_ENCODER_MODE_DVI; } - break; case DRM_MODE_CONNECTOR_LVDS: return ATOM_ENCODER_MODE_LVDS; - break; case DRM_MODE_CONNECTOR_DisplayPort: dig_connector = amdgpu_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || @@ -519,20 +514,16 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) } else { return ATOM_ENCODER_MODE_DVI; } - break; case DRM_MODE_CONNECTOR_eDP: return ATOM_ENCODER_MODE_DP; case DRM_MODE_CONNECTOR_DVIA: case DRM_MODE_CONNECTOR_VGA: return ATOM_ENCODER_MODE_CRT; - break; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_9PinDIN: /* fix me */ return ATOM_ENCODER_MODE_TV; - /*return ATOM_ENCODER_MODE_CV;*/ - break; } } @@ -573,7 +564,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder, int action, int panel_mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -762,7 +753,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1178,7 +1169,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector, { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct drm_device *dev = amdgpu_connector->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union dig_transmitter_control args; int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); uint8_t frev, crev; @@ -1225,7 +1216,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder); union external_encoder_control args; @@ -1466,7 +1457,7 @@ void amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); union crtc_source_param args; @@ -1673,7 +1664,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder) void amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -1701,7 +1692,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); @@ -1751,7 +1742,7 @@ amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); uint32_t bios_0_scratch; @@ -1790,7 +1781,7 @@ amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder); @@ -1848,7 +1839,7 @@ amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector, bool connected) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -1999,7 +1990,7 @@ struct amdgpu_encoder_atom_dig * amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_mode_info *mode_info = &adev->mode_info; int index = GetIndexIntoMasterTable(DATA, LVDS_Info); uint16_t data_offset, misc; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c index 980c363b1a0a..af0335535f82 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c @@ -40,7 +40,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan, u8 *buf, u8 num) { struct drm_device *dev = chan->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; @@ -76,11 +76,6 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan, } args.lpI2CDataOut = cpu_to_le16(out); } else { - if (num > ATOM_MAX_HW_I2C_READ) { - DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); - r = -EINVAL; - goto done; - } args.ucRegIndex = 0; args.lpI2CDataOut = 0; } @@ -164,7 +159,7 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data) +void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device *adev, u8 slave_addr, u8 line_number, u8 offset, u8 data) { PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 1befdee9f0f1..54f28c075f21 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -26,6 +26,8 @@ #include <linux/module.h> #include <linux/pci.h> +#include <drm/amdgpu_drm.h> + #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_ih.h" @@ -68,7 +70,81 @@ #include "amdgpu_dm.h" #include "amdgpu_amdkfd.h" -#include "dce_virtual.h" +#include "amdgpu_vkms.h" + +static const struct amdgpu_video_codec_info cik_video_codecs_encode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 0, + }, +}; + +static const struct amdgpu_video_codecs cik_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(cik_video_codecs_encode_array), + .codec_array = cik_video_codecs_encode_array, +}; + +static const struct amdgpu_video_codec_info cik_video_codecs_decode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 3, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 5, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 41, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 4, + }, +}; + +static const struct amdgpu_video_codecs cik_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(cik_video_codecs_decode_array), + .codec_array = cik_video_codecs_decode_array, +}; + +static int cik_query_video_codecs(struct amdgpu_device *adev, bool encode, + const struct amdgpu_video_codecs **codecs) +{ + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + if (encode) + *codecs = &cik_video_codecs_encode; + else + *codecs = &cik_video_codecs_decode; + return 0; + default: + return -EINVAL; + } +} /* * Indirect registers accessor @@ -1251,13 +1327,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); } -static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) +/** + * cik_asic_pci_config_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Use PCI Config method to reset the GPU. + * + * Returns 0 for success. + */ +static int cik_asic_pci_config_reset(struct amdgpu_device *adev) { struct kv_reset_save_regs kv_save = { 0 }; u32 i; int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); if (adev->flags & AMD_IS_APU) kv_save_regs_for_reset(adev, &kv_save); @@ -1285,29 +1370,20 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + return r; } -/** - * cik_asic_pci_config_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * - * Use PCI Config method to reset the GPU. - * - * Returns 0 for success. - */ -static int cik_asic_pci_config_reset(struct amdgpu_device *adev) +static bool cik_asic_supports_baco(struct amdgpu_device *adev) { - int r; - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = cik_gpu_pci_config_reset(adev); - - amdgpu_atombios_scratch_regs_engine_hung(adev, false); - - return r; + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + return amdgpu_dpm_is_baco_supported(adev); + default: + return false; + } } static enum amd_reset_method @@ -1315,12 +1391,18 @@ cik_asic_reset_method(struct amdgpu_device *adev) { bool baco_reset; + if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY || + amdgpu_reset_method == AMD_RESET_METHOD_BACO) + return amdgpu_reset_method; + + if (amdgpu_reset_method != -1) + dev_warn(adev->dev, "Specified reset:%d isn't supported, using AUTO instead.\n", + amdgpu_reset_method); + switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: - /* disable baco reset until it works */ - /* smu7_asic_get_baco_capability(adev, &baco_reset); */ - baco_reset = false; + baco_reset = cik_asic_supports_baco(adev); break; default: baco_reset = false; @@ -1347,10 +1429,10 @@ static int cik_asic_reset(struct amdgpu_device *adev) int r; if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); - r = smu7_asic_baco_reset(adev); + dev_info(adev->dev, "BACO reset\n"); + r = amdgpu_dpm_baco_reset(adev); } else { + dev_info(adev->dev, "PCI CONFIG reset\n"); r = cik_asic_pci_config_reset(adev); } @@ -1800,12 +1882,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } -static void cik_detect_hw_virtualization(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { @@ -1908,6 +1984,10 @@ static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev) return (nak_r + nak_g); } +static void cik_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, @@ -1927,6 +2007,9 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .get_pcie_usage = &cik_get_pcie_usage, .need_reset_on_init = &cik_need_reset_on_init, .get_pcie_replay_count = &cik_get_pcie_replay_count, + .supports_baco = &cik_asic_supports_baco, + .pre_asic_init = &cik_pre_asic_init, + .query_video_codecs = &cik_query_video_codecs, }; static int cik_common_early_init(void *handle) @@ -2167,8 +2250,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block = int cik_set_ip_blocks(struct amdgpu_device *adev) { - cik_detect_hw_virtualization(adev); - switch (adev->asic_type) { case CHIP_BONAIRE: amdgpu_device_ip_block_add(adev, &cik_common_ip_block); @@ -2178,7 +2259,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); @@ -2196,7 +2277,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); @@ -2214,7 +2295,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &kv_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); @@ -2234,7 +2315,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &kv_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 9870bf27870e..f91ab4c246b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -31,7 +31,5 @@ void cik_srbm_select(struct amdgpu_device *adev, int cik_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); -int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); -int smu7_asic_baco_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 401c99f0b2d0..df385ffc9768 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -177,6 +177,7 @@ static void cik_ih_irq_disable(struct amdgpu_device *adev) * cik_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to fetch wptr * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (CIK). Also check for @@ -266,6 +267,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, * cik_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to set wptr * * Set the IH ring buffer rptr. */ @@ -307,8 +309,7 @@ static int cik_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; @@ -316,14 +317,9 @@ static int cik_ih_sw_fini(void *handle) static int cik_ih_hw_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = cik_ih_irq_init(adev); - if (r) - return r; - - return 0; + return cik_ih_irq_init(adev); } static int cik_ih_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c45304f1047c..c8ebd108548d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -195,7 +195,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], - (lower_32_bits(ring->wptr) << 2) & 0x3fffc); + (lower_32_bits(ring->wptr) << 2) & 0x3fffc); } static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) @@ -215,7 +215,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrive vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (CIK). */ @@ -228,7 +230,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, u32 extra_bits = vmid & 0xf; /* IB packet must end on a 8 DW boundary */ - cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); + cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ @@ -267,7 +269,9 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) * cik_sdma_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -320,8 +324,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); } - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -657,6 +659,7 @@ error_free_wb: * cik_sdma_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure. @@ -679,7 +682,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -716,7 +720,7 @@ err0: } /** - * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART + * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -742,7 +746,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, } /** - * cik_sdma_vm_write_pages - update PTEs by writing them manually + * cik_sdma_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -771,7 +775,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_set_pages - update the page tables using sDMA + * cik_sdma_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -800,8 +804,9 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_pad_ib - pad the IB to the required number of dw + * cik_sdma_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * */ @@ -811,7 +816,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -850,7 +855,8 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (CIK). @@ -978,9 +984,9 @@ static int cik_sdma_sw_init(void *handle) sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -1071,22 +1077,19 @@ static int cik_sdma_soft_reset(void *handle) { u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 tmp = RREG32(mmSRBM_STATUS2); + u32 tmp; - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp |= SDMA0_F32_CNTL__HALT_MASK; - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; - } + /* sdma0 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; + + /* sdma1 */ + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); + tmp |= SDMA0_F32_CNTL__HALT_MASK; + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); @@ -1301,10 +1304,11 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) /** * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @tmz: is this a secure operation * * Copy GPU buffers using the DMA engine (CIK). * Used by the amdgpu ttm implementation to move pages if @@ -1313,7 +1317,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + bool tmz) { ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = byte_count; @@ -1327,7 +1332,7 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, /** * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -1372,16 +1377,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version cik_sdma_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index cee6e8a3ad9c..55982c0064b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -54,8 +54,6 @@ #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 -#define AMDGPU_NUM_OF_VMIDS 8 - #define PIPEID(x) ((x) << 0) #define MEID(x) ((x) << 2) #define VMID(x) ((x) << 4) @@ -450,7 +448,7 @@ # define PACKET3_DMA_DATA_CMD_SAIC (1 << 28) # define PACKET3_DMA_DATA_CMD_DAIC (1 << 29) # define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30) -#define PACKET3_AQUIRE_MEM 0x58 +#define PACKET3_ACQUIRE_MEM 0x58 #define PACKET3_REWIND 0x59 #define PACKET3_LOAD_UCONFIG_REG 0x5E #define PACKET3_LOAD_SH_REG 0x5F diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 1dca0cabc326..b8c47e0cf37a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -179,6 +179,7 @@ static void cz_ih_irq_disable(struct amdgpu_device *adev) * cz_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to fetch wptr * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (VI). Also check for @@ -193,19 +194,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + + +out: return (wptr & ih->ptr_mask); } @@ -213,6 +225,8 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev, * cz_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to decode + * @entry: IV entry to place decoded information into * * Decodes the interrupt vector at the current rptr * position and also advance the position. @@ -245,6 +259,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, * cz_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to set rptr * * Set the IH ring buffer rptr. */ @@ -286,8 +301,7 @@ static int cz_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 40d2ac723dd6..d1570a462a51 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -228,6 +228,7 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @crtc_id: crtc to cleanup pageflip on * @crtc_base: new address of the crtc (GPU MC address) + * @async: asynchronous flip * * Triggers the actual pageflip by updating the primary * surface base address. @@ -328,7 +329,7 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -383,7 +384,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -504,7 +505,7 @@ void dce_v10_0_disable_dce(struct amdgpu_device *adev) static void dce_v10_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1209,7 +1210,7 @@ static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *ad static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1226,7 +1227,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1272,7 +1273,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1328,7 +1329,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1483,7 +1484,7 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev) static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1519,7 +1520,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint8_t *frame = buffer + 3; @@ -1538,7 +1539,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -1569,7 +1570,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1749,7 +1750,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1822,7 +1823,7 @@ static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1836,7 +1837,7 @@ static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); @@ -1850,7 +1851,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1861,7 +1862,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1980,8 +1980,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } @@ -2095,7 +2095,7 @@ static void dce_v10_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); u32 tmp; @@ -2111,7 +2111,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; u32 tmp; @@ -2202,22 +2202,18 @@ static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder) return 1; else return 0; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: return 6; - break; default: DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); return 0; @@ -2250,7 +2246,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2285,7 +2281,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2300,18 +2296,18 @@ static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static void dce_v10_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2319,17 +2315,17 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc) WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; amdgpu_crtc->cursor_x = x; @@ -2404,7 +2400,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } @@ -2412,7 +2408,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); @@ -2447,7 +2443,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2494,12 +2490,16 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v10_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2553,7 +2553,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2673,7 +2673,7 @@ static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { - return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1); + return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1); } static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { @@ -2685,6 +2685,7 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { .prepare = dce_v10_0_crtc_prepare, .commit = dce_v10_0_crtc_commit, .disable = dce_v10_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) @@ -2696,7 +2697,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2704,8 +2705,8 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = 128; amdgpu_crtc->max_cursor_height = 128; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; switch (amdgpu_crtc->crtc_id) { case 0: @@ -2787,24 +2788,24 @@ static int dce_v10_0_sw_init(void *handle) if (r) return r; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -2814,7 +2815,7 @@ static int dce_v10_0_sw_init(void *handle) } if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2827,7 +2828,7 @@ static int dce_v10_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = true; return 0; @@ -2839,13 +2840,13 @@ static int dce_v10_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v10_0_audio_fini(adev); dce_v10_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -2895,6 +2896,11 @@ static int dce_v10_0_hw_fini(void *handle) static int dce_v10_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2919,8 +2925,10 @@ static int dce_v10_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v10_0_is_idle(void *handle) @@ -3152,14 +3160,14 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, if (amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3171,7 +3179,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, if (works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3240,7 +3248,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); @@ -3340,7 +3348,7 @@ dce_v10_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3380,7 +3388,7 @@ static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v10_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3480,7 +3488,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 898ef72d423c..18a7b3bd633b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -246,6 +246,7 @@ static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @crtc_id: crtc to cleanup pageflip on * @crtc_base: new address of the crtc (GPU MC address) + * @async: asynchronous flip * * Triggers the actual pageflip by updating the primary * surface base address. @@ -346,7 +347,7 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -400,7 +401,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -530,7 +531,7 @@ void dce_v11_0_disable_dce(struct amdgpu_device *adev) static void dce_v11_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1235,7 +1236,7 @@ static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *ad static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1252,7 +1253,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1298,7 +1299,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1354,7 +1355,7 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1525,7 +1526,7 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev) static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1561,7 +1562,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint8_t *frame = buffer + 3; @@ -1580,7 +1581,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -1611,7 +1612,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1791,7 +1792,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1864,7 +1865,7 @@ static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1878,7 +1879,7 @@ static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); @@ -1892,7 +1893,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1903,7 +1904,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -2022,8 +2022,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } @@ -2137,7 +2137,7 @@ static void dce_v11_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); u32 tmp; @@ -2153,7 +2153,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; u32 tmp; @@ -2235,22 +2235,18 @@ static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder) return 1; else return 0; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: return 6; - break; default: DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); return 0; @@ -2283,7 +2279,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2304,19 +2300,16 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) return ATOM_COMBOPHY_PLL1; else return ATOM_COMBOPHY_PLL0; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return ATOM_COMBOPHY_PLL3; else return ATOM_COMBOPHY_PLL2; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return ATOM_COMBOPHY_PLL5; else return ATOM_COMBOPHY_PLL4; - break; default: DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); return ATOM_PPLL_INVALID; @@ -2342,7 +2335,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) /* XXX need to determine what plls are available on each DCE11 part */ pll_in_use = amdgpu_pll_get_use_mask(crtc); - if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) { + if (adev->flags & AMD_IS_APU) { if (!(pll_in_use & (1 << ATOM_PPLL1))) return ATOM_PPLL1; if (!(pll_in_use & (1 << ATOM_PPLL0))) @@ -2364,7 +2357,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2379,18 +2372,18 @@ static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static void dce_v11_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2398,17 +2391,17 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc) WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); + tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); } static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; amdgpu_crtc->cursor_x = x; @@ -2483,7 +2476,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } @@ -2491,7 +2484,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); @@ -2526,7 +2519,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2573,12 +2566,16 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v11_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2632,7 +2629,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2702,7 +2699,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (!amdgpu_crtc->adjusted_clock) return -EINVAL; @@ -2781,7 +2778,7 @@ static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { - return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); + return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); } static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { @@ -2793,6 +2790,7 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { .prepare = dce_v11_0_crtc_prepare, .commit = dce_v11_0_crtc_commit, .disable = dce_v11_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) @@ -2804,7 +2802,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2812,8 +2810,8 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = 128; amdgpu_crtc->max_cursor_height = 128; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; switch (amdgpu_crtc->crtc_id) { case 0: @@ -2908,24 +2906,24 @@ static int dce_v11_0_sw_init(void *handle) if (r) return r; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ @@ -2936,7 +2934,7 @@ static int dce_v11_0_sw_init(void *handle) } if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2949,7 +2947,7 @@ static int dce_v11_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = true; return 0; @@ -2961,13 +2959,13 @@ static int dce_v11_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v11_0_audio_fini(adev); dce_v11_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -3028,6 +3026,11 @@ static int dce_v11_0_hw_fini(void *handle) static int dce_v11_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -3052,8 +3055,10 @@ static int dce_v11_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v11_0_is_idle(void *handle) @@ -3278,14 +3283,14 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, if(amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3297,7 +3302,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, if(works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3367,7 +3372,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); @@ -3466,7 +3471,7 @@ dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3506,7 +3511,7 @@ static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3606,7 +3611,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index db15a112becc..c7803dc2b2d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -180,6 +180,7 @@ static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @crtc_id: crtc to cleanup pageflip on * @crtc_base: new address of the crtc (GPU MC address) + * @async: asynchronous flip * * Does the actual pageflip (evergreen+). * During vblank we take the crtc lock and wait for the update_pending @@ -279,7 +280,7 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -324,7 +325,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -401,7 +402,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -455,7 +456,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder) } /** - * cik_get_number_of_dram_channels - get the number of dram channels + * si_get_number_of_dram_channels - get the number of dram channels * * @adev: amdgpu_device pointer * @@ -1047,7 +1048,6 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev, /** - * * dce_v6_0_bandwidth_update - program display watermarks * * @adev: amdgpu_device pointer @@ -1114,7 +1114,7 @@ static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *ade static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1130,7 +1130,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1174,7 +1174,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1235,7 +1235,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1392,7 +1392,7 @@ static void dce_v6_0_audio_fini(struct amdgpu_device *adev) static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1408,7 +1408,7 @@ static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder, uint32_t clock, int bpc) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1446,7 +1446,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1488,7 +1488,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); u32 tmp; @@ -1522,7 +1522,7 @@ static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1566,7 +1566,7 @@ static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1579,7 +1579,7 @@ static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1616,7 +1616,7 @@ static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1645,7 +1645,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1714,7 +1714,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1788,7 +1788,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1799,7 +1799,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); } @@ -1810,7 +1810,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1820,7 +1820,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1929,8 +1928,8 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } @@ -2033,7 +2032,7 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -2048,7 +2047,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; @@ -2148,7 +2147,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2177,7 +2176,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2192,11 +2191,11 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } @@ -2204,17 +2203,17 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) static void dce_v6_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(amdgpu_crtc->cursor_addr)); WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - CUR_CONTROL__CURSOR_EN_MASK | - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + CUR_CONTROL__CURSOR_EN_MASK | + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } @@ -2222,7 +2221,7 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; int w = amdgpu_crtc->cursor_width; @@ -2299,7 +2298,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } @@ -2307,7 +2306,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); @@ -2342,7 +2341,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2388,12 +2387,16 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v6_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2443,7 +2446,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2563,7 +2566,7 @@ static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { - return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1); + return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1); } static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { @@ -2575,6 +2578,7 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { .prepare = dce_v6_0_crtc_prepare, .commit = dce_v6_0_crtc_commit, .disable = dce_v6_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) @@ -2586,7 +2590,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2594,8 +2598,8 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = CURSOR_WIDTH; amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; @@ -2664,20 +2668,20 @@ static int dce_v6_0_sw_init(void *handle) adev->mode_info.mode_config_initialized = true; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -2688,7 +2692,7 @@ static int dce_v6_0_sw_init(void *handle) ret = amdgpu_atombios_get_connector_info_from_object_table(adev); if (ret) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2701,7 +2705,7 @@ static int dce_v6_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); return r; } @@ -2712,12 +2716,12 @@ static int dce_v6_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v6_0_audio_fini(adev); dce_v6_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -2765,7 +2769,11 @@ static int dce_v6_0_hw_fini(void *handle) static int dce_v6_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2789,8 +2797,10 @@ static int dce_v6_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v6_0_is_idle(void *handle) @@ -2962,7 +2972,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); break; @@ -3031,14 +3041,14 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, if (amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3050,7 +3060,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, if (works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3141,7 +3151,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3182,7 +3192,7 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3292,7 +3302,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index f06c9022c1fd..b200b9e722d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -176,6 +176,7 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @crtc_id: crtc to cleanup pageflip on * @crtc_base: new address of the crtc (GPU MC address) + * @async: asynchronous flip * * Triggers the actual pageflip by updating the primary * surface base address. @@ -273,7 +274,7 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -318,7 +319,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -444,7 +445,7 @@ void dce_v8_0_disable_dce(struct amdgpu_device *adev) static void dce_v8_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1146,7 +1147,7 @@ static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *ade static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 offset; @@ -1164,7 +1165,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1225,7 +1226,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1278,7 +1279,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 offset; @@ -1446,7 +1447,7 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev) static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1469,7 +1470,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint32_t offset = dig->afmt->offset; @@ -1489,7 +1490,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -1516,7 +1517,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1678,7 +1679,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1751,7 +1752,7 @@ static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1765,7 +1766,7 @@ static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); @@ -1779,7 +1780,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1790,7 +1791,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1901,8 +1901,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } @@ -2004,7 +2004,7 @@ static void dce_v8_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -2018,7 +2018,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; @@ -2140,7 +2140,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2188,7 +2188,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2203,34 +2203,34 @@ static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } static void dce_v8_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(amdgpu_crtc->cursor_addr)); WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, lower_32_bits(amdgpu_crtc->cursor_addr)); - WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, - CUR_CONTROL__CURSOR_EN_MASK | - (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | - (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); + WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, + CUR_CONTROL__CURSOR_EN_MASK | + (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | + (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); } static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; amdgpu_crtc->cursor_x = x; @@ -2305,7 +2305,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } @@ -2313,7 +2313,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return ret; } amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); @@ -2348,7 +2348,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2395,12 +2395,16 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { .set_config = amdgpu_display_crtc_set_config, .destroy = dce_v8_0_crtc_destroy, .page_flip_target = amdgpu_display_crtc_page_flip_target, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_enable_vblank_kms, + .disable_vblank = amdgpu_disable_vblank_kms, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, }; static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2454,7 +2458,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2494,7 +2498,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL2: /* disable the ppll */ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; case ATOM_PPLL0: /* disable the ppll */ @@ -2581,7 +2585,7 @@ static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { - return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1); + return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1); } static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { @@ -2593,6 +2597,7 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { .prepare = dce_v8_0_crtc_prepare, .commit = dce_v8_0_crtc_commit, .disable = dce_v8_0_crtc_disable, + .get_scanout_position = amdgpu_crtc_get_scanout_position, }; static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) @@ -2604,7 +2609,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2612,8 +2617,8 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; @@ -2684,24 +2689,24 @@ static int dce_v8_0_sw_init(void *handle) if (r) return r; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -2711,7 +2716,7 @@ static int dce_v8_0_sw_init(void *handle) } if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2724,7 +2729,7 @@ static int dce_v8_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = true; return 0; @@ -2736,13 +2741,13 @@ static int dce_v8_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v8_0_audio_fini(adev); dce_v8_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -2790,6 +2795,11 @@ static int dce_v8_0_hw_fini(void *handle) static int dce_v8_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_display_suspend_helper(adev); + if (r) + return r; adev->mode_info.bl_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); @@ -2814,8 +2824,10 @@ static int dce_v8_0_resume(void *handle) amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, bl_level); } + if (ret) + return ret; - return ret; + return amdgpu_display_resume_helper(adev); } static bool dce_v8_0_is_idle(void *handle) @@ -3052,7 +3064,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); break; @@ -3121,14 +3133,14 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, if (amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3140,7 +3152,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, if (works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3228,7 +3240,7 @@ dce_v8_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3268,7 +3280,7 @@ static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v8_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3368,7 +3380,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c deleted file mode 100644 index e4f94863332c..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ /dev/null @@ -1,761 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <drm/drm_vblank.h> - -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_i2c.h" -#include "atom.h" -#include "amdgpu_pll.h" -#include "amdgpu_connectors.h" -#ifdef CONFIG_DRM_AMDGPU_SI -#include "dce_v6_0.h" -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK -#include "dce_v8_0.h" -#endif -#include "dce_v10_0.h" -#include "dce_v11_0.h" -#include "dce_virtual.h" -#include "ivsrcid/ivsrcid_vislands30.h" - -#define DCE_VIRTUAL_VBLANK_PERIOD 16666666 - - -static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); -static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); -static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, - int index); -static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, - int crtc, - enum amdgpu_interrupt_state state); - -static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) -{ - return 0; -} - -static void dce_virtual_page_flip(struct amdgpu_device *adev, - int crtc_id, u64 crtc_base, bool async) -{ - return; -} - -static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, - u32 *vbl, u32 *position) -{ - *vbl = 0; - *position = 0; - - return -EINVAL; -} - -static bool dce_virtual_hpd_sense(struct amdgpu_device *adev, - enum amdgpu_hpd_id hpd) -{ - return true; -} - -static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev, - enum amdgpu_hpd_id hpd) -{ - return; -} - -static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) -{ - return 0; -} - -/** - * dce_virtual_bandwidth_update - program display watermarks - * - * @adev: amdgpu_device pointer - * - * Calculate and program the display watermarks and line - * buffer allocation (CIK). - */ -static void dce_virtual_bandwidth_update(struct amdgpu_device *adev) -{ - return; -} - -static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, - u16 *green, u16 *blue, uint32_t size, - struct drm_modeset_acquire_ctx *ctx) -{ - return 0; -} - -static void dce_virtual_crtc_destroy(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - drm_crtc_cleanup(crtc); - kfree(amdgpu_crtc); -} - -static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { - .cursor_set2 = NULL, - .cursor_move = NULL, - .gamma_set = dce_virtual_crtc_gamma_set, - .set_config = amdgpu_display_crtc_set_config, - .destroy = dce_virtual_crtc_destroy, - .page_flip_target = amdgpu_display_crtc_page_flip_target, -}; - -static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - unsigned type; - - if (amdgpu_sriov_vf(adev)) - return; - - switch (mode) { - case DRM_MODE_DPMS_ON: - amdgpu_crtc->enabled = true; - /* Make sure VBLANK interrupts are still enabled */ - type = amdgpu_display_crtc_idx_to_irq_type(adev, - amdgpu_crtc->crtc_id); - amdgpu_irq_update(adev, &adev->crtc_irq, type); - drm_crtc_vblank_on(crtc); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - drm_crtc_vblank_off(crtc); - amdgpu_crtc->enabled = false; - break; - } -} - - -static void dce_virtual_crtc_prepare(struct drm_crtc *crtc) -{ - dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void dce_virtual_crtc_commit(struct drm_crtc *crtc) -{ - dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON); -} - -static void dce_virtual_crtc_disable(struct drm_crtc *crtc) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; -} - -static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, struct drm_framebuffer *old_fb) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - /* update the hw version fpr dpm */ - amdgpu_crtc->hw_mode = *adjusted_mode; - - return 0; -} - -static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - - -static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - return 0; -} - -static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y, enum mode_set_atomic state) -{ - return 0; -} - -static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { - .dpms = dce_virtual_crtc_dpms, - .mode_fixup = dce_virtual_crtc_mode_fixup, - .mode_set = dce_virtual_crtc_mode_set, - .mode_set_base = dce_virtual_crtc_set_base, - .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic, - .prepare = dce_virtual_crtc_prepare, - .commit = dce_virtual_crtc_commit, - .disable = dce_virtual_crtc_disable, -}; - -static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) -{ - struct amdgpu_crtc *amdgpu_crtc; - - amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + - (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); - if (amdgpu_crtc == NULL) - return -ENOMEM; - - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs); - - drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); - amdgpu_crtc->crtc_id = index; - adev->mode_info.crtcs[index] = amdgpu_crtc; - - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; - amdgpu_crtc->encoder = NULL; - amdgpu_crtc->connector = NULL; - amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; - drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); - - return 0; -} - -static int dce_virtual_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dce_virtual_set_display_funcs(adev); - dce_virtual_set_irq_funcs(adev); - - adev->mode_info.num_hpd = 1; - adev->mode_info.num_dig = 1; - return 0; -} - -static struct drm_encoder * -dce_virtual_encoder(struct drm_connector *connector) -{ - struct drm_encoder *encoder; - - drm_connector_for_each_possible_encoder(connector, encoder) { - if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) - return encoder; - } - - /* pick the first one */ - drm_connector_for_each_possible_encoder(connector, encoder) - return encoder; - - return NULL; -} - -static int dce_virtual_get_modes(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_display_mode *mode = NULL; - unsigned i; - static const struct mode_size { - int w; - int h; - } common_modes[17] = { - { 640, 480}, - { 720, 480}, - { 800, 600}, - { 848, 480}, - {1024, 768}, - {1152, 768}, - {1280, 720}, - {1280, 800}, - {1280, 854}, - {1280, 960}, - {1280, 1024}, - {1440, 900}, - {1400, 1050}, - {1680, 1050}, - {1600, 1200}, - {1920, 1080}, - {1920, 1200} - }; - - for (i = 0; i < 17; i++) { - mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); - drm_mode_probed_add(connector, mode); - } - - return 0; -} - -static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - return MODE_OK; -} - -static int -dce_virtual_dpms(struct drm_connector *connector, int mode) -{ - return 0; -} - -static int -dce_virtual_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t val) -{ - return 0; -} - -static void dce_virtual_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(connector); -} - -static void dce_virtual_force(struct drm_connector *connector) -{ - return; -} - -static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = { - .get_modes = dce_virtual_get_modes, - .mode_valid = dce_virtual_mode_valid, - .best_encoder = dce_virtual_encoder, -}; - -static const struct drm_connector_funcs dce_virtual_connector_funcs = { - .dpms = dce_virtual_dpms, - .fill_modes = drm_helper_probe_single_connector_modes, - .set_property = dce_virtual_set_property, - .destroy = dce_virtual_destroy, - .force = dce_virtual_force, -}; - -static int dce_virtual_sw_init(void *handle) -{ - int r, i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq); - if (r) - return r; - - adev->ddev->max_vblank_count = 0; - - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; - - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; - - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; - - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; - - r = amdgpu_display_modeset_create_props(adev); - if (r) - return r; - - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; - - /* allocate crtcs, encoders, connectors */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = dce_virtual_crtc_init(adev, i); - if (r) - return r; - r = dce_virtual_connector_encoder_init(adev, i); - if (r) - return r; - } - - drm_kms_helper_poll_init(adev->ddev); - - adev->mode_info.mode_config_initialized = true; - return 0; -} - -static int dce_virtual_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - kfree(adev->mode_info.bios_hardcoded_edid); - - drm_kms_helper_poll_fini(adev->ddev); - - drm_mode_config_cleanup(adev->ddev); - /* clear crtcs pointer to avoid dce irq finish routine access freed data */ - memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS); - adev->mode_info.mode_config_initialized = false; - return 0; -} - -static int dce_virtual_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - dce_v6_0_disable_dce(adev); - break; -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KAVERI: - case CHIP_KABINI: - case CHIP_MULLINS: - dce_v8_0_disable_dce(adev); - break; -#endif - case CHIP_FIJI: - case CHIP_TONGA: - dce_v10_0_disable_dce(adev); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_VEGAM: - dce_v11_0_disable_dce(adev); - break; - case CHIP_TOPAZ: -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_HAINAN: -#endif - /* no DCE */ - break; - default: - break; - } - return 0; -} - -static int dce_virtual_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i = 0; - - for (i = 0; i<adev->mode_info.num_crtc; i++) - if (adev->mode_info.crtcs[i]) - dce_virtual_set_crtc_vblank_interrupt_state(adev, i, AMDGPU_IRQ_STATE_DISABLE); - - return 0; -} - -static int dce_virtual_suspend(void *handle) -{ - return dce_virtual_hw_fini(handle); -} - -static int dce_virtual_resume(void *handle) -{ - return dce_virtual_hw_init(handle); -} - -static bool dce_virtual_is_idle(void *handle) -{ - return true; -} - -static int dce_virtual_wait_for_idle(void *handle) -{ - return 0; -} - -static int dce_virtual_soft_reset(void *handle) -{ - return 0; -} - -static int dce_virtual_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int dce_virtual_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -static const struct amd_ip_funcs dce_virtual_ip_funcs = { - .name = "dce_virtual", - .early_init = dce_virtual_early_init, - .late_init = NULL, - .sw_init = dce_virtual_sw_init, - .sw_fini = dce_virtual_sw_fini, - .hw_init = dce_virtual_hw_init, - .hw_fini = dce_virtual_hw_fini, - .suspend = dce_virtual_suspend, - .resume = dce_virtual_resume, - .is_idle = dce_virtual_is_idle, - .wait_for_idle = dce_virtual_wait_for_idle, - .soft_reset = dce_virtual_soft_reset, - .set_clockgating_state = dce_virtual_set_clockgating_state, - .set_powergating_state = dce_virtual_set_powergating_state, -}; - -/* these are handled by the primary encoders */ -static void dce_virtual_encoder_prepare(struct drm_encoder *encoder) -{ - return; -} - -static void dce_virtual_encoder_commit(struct drm_encoder *encoder) -{ - return; -} - -static void -dce_virtual_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return; -} - -static void dce_virtual_encoder_disable(struct drm_encoder *encoder) -{ - return; -} - -static void -dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - return; -} - -static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - -static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = { - .dpms = dce_virtual_encoder_dpms, - .mode_fixup = dce_virtual_encoder_mode_fixup, - .prepare = dce_virtual_encoder_prepare, - .mode_set = dce_virtual_encoder_mode_set, - .commit = dce_virtual_encoder_commit, - .disable = dce_virtual_encoder_disable, -}; - -static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); - kfree(encoder); -} - -static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { - .destroy = dce_virtual_encoder_destroy, -}; - -static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, - int index) -{ - struct drm_encoder *encoder; - struct drm_connector *connector; - - /* add a new encoder */ - encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL); - if (!encoder) - return -ENOMEM; - encoder->possible_crtcs = 1 << index; - drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL, NULL); - drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); - - connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL); - if (!connector) { - kfree(encoder); - return -ENOMEM; - } - - /* add a new connector */ - drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs); - connector->display_info.subpixel_order = SubPixelHorizontalRGB; - connector->interlace_allowed = false; - connector->doublescan_allowed = false; - drm_connector_register(connector); - - /* link them */ - drm_connector_attach_encoder(connector, encoder); - - return 0; -} - -static const struct amdgpu_display_funcs dce_virtual_display_funcs = { - .bandwidth_update = &dce_virtual_bandwidth_update, - .vblank_get_counter = &dce_virtual_vblank_get_counter, - .backlight_set_level = NULL, - .backlight_get_level = NULL, - .hpd_sense = &dce_virtual_hpd_sense, - .hpd_set_polarity = &dce_virtual_hpd_set_polarity, - .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, - .page_flip = &dce_virtual_page_flip, - .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, - .add_encoder = NULL, - .add_connector = NULL, -}; - -static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) -{ - adev->mode_info.funcs = &dce_virtual_display_funcs; -} - -static int dce_virtual_pageflip(struct amdgpu_device *adev, - unsigned crtc_id) -{ - unsigned long flags; - struct amdgpu_crtc *amdgpu_crtc; - struct amdgpu_flip_work *works; - - amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; - - if (crtc_id >= adev->mode_info.num_crtc) { - DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); - return -EINVAL; - } - - /* IRQ could occur when in initial stage */ - if (amdgpu_crtc == NULL) - return 0; - - spin_lock_irqsave(&adev->ddev->event_lock, flags); - works = amdgpu_crtc->pflip_works; - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { - DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " - "AMDGPU_FLIP_SUBMITTED(%d)\n", - amdgpu_crtc->pflip_status, - AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - return 0; - } - - /* page flip completed. clean up */ - amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; - amdgpu_crtc->pflip_works = NULL; - - /* wakeup usersapce */ - if (works->event) - drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - - drm_crtc_vblank_put(&amdgpu_crtc->base); - amdgpu_bo_unref(&works->old_abo); - kfree(works->shared); - kfree(works); - - return 0; -} - -static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) -{ - struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer, - struct amdgpu_crtc, vblank_timer); - struct drm_device *ddev = amdgpu_crtc->base.dev; - struct amdgpu_device *adev = ddev->dev_private; - - drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); - dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); - hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD, - HRTIMER_MODE_REL); - - return HRTIMER_NORESTART; -} - -static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, - int crtc, - enum amdgpu_interrupt_state state) -{ - if (crtc >= adev->mode_info.num_crtc || !adev->mode_info.crtcs[crtc]) { - DRM_DEBUG("invalid crtc %d\n", crtc); - return; - } - - if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { - DRM_DEBUG("Enable software vsync timer\n"); - hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer, - DCE_VIRTUAL_VBLANK_PERIOD); - adev->mode_info.crtcs[crtc]->vblank_timer.function = - dce_virtual_vblank_timer_handle; - hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer, - DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL); - } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { - DRM_DEBUG("Disable software vsync timer\n"); - hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer); - } - - adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state; - DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); -} - - -static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - if (type > AMDGPU_CRTC_IRQ_VBLANK6) - return -EINVAL; - - dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state); - - return 0; -} - -static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { - .set = dce_virtual_set_crtc_irq_state, - .process = NULL, -}; - -static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) -{ - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; - adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; -} - -const struct amdgpu_ip_block_version dce_virtual_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_DCE, - .major = 1, - .minor = 0, - .rev = 0, - .funcs = &dce_virtual_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index d6221298b477..2d01ac0d4c11 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -31,6 +31,9 @@ static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; static void df_v1_7_sw_init(struct amdgpu_device *adev) { + adev->df.hash_status.hash_64k = false; + adev->df.hash_status.hash_2m = false; + adev->df.hash_status.hash_1g = false; } static void df_v1_7_sw_fini(struct amdgpu_device *adev) @@ -38,7 +41,7 @@ static void df_v1_7_sw_fini(struct amdgpu_device *adev) } static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev, - bool enable) + bool enable) { u32 tmp; @@ -66,7 +69,7 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev) { int fb_channel_number; - fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); return df_v1_7_channel_number[fb_channel_number]; } @@ -77,7 +80,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, u32 tmp; /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); + adev->df.funcs->enable_broadcast_mode(adev, true); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); @@ -92,7 +95,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, } /* Exit boradcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); + adev->df.funcs->enable_broadcast_mode(adev, false); } static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 4043ebcea5de..43c5e3ec9a39 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -27,71 +27,20 @@ #include "df/df_3_6_offset.h" #include "df/df_3_6_sh_mask.h" -static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, - 16, 32, 0, 0, 0, 2, 4, 8}; - -/* init df format attrs */ -AMDGPU_PMU_ATTR(event, "config:0-7"); -AMDGPU_PMU_ATTR(instance, "config:8-15"); -AMDGPU_PMU_ATTR(umask, "config:16-23"); - -/* df format attributes */ -static struct attribute *df_v3_6_format_attrs[] = { - &pmu_attr_event.attr, - &pmu_attr_instance.attr, - &pmu_attr_umask.attr, - NULL -}; - -/* df format attribute group */ -static struct attribute_group df_v3_6_format_attr_group = { - .name = "format", - .attrs = df_v3_6_format_attrs, -}; +#define DF_3_6_SMN_REG_INST_DIST 0x8 +#define DF_3_6_INST_CNT 8 -/* df event attrs */ -AMDGPU_PMU_ATTR(cake0_pcsout_txdata, - "event=0x7,instance=0x46,umask=0x2"); -AMDGPU_PMU_ATTR(cake1_pcsout_txdata, - "event=0x7,instance=0x47,umask=0x2"); -AMDGPU_PMU_ATTR(cake0_pcsout_txmeta, - "event=0x7,instance=0x46,umask=0x4"); -AMDGPU_PMU_ATTR(cake1_pcsout_txmeta, - "event=0x7,instance=0x47,umask=0x4"); -AMDGPU_PMU_ATTR(cake0_ftiinstat_reqalloc, - "event=0xb,instance=0x46,umask=0x4"); -AMDGPU_PMU_ATTR(cake1_ftiinstat_reqalloc, - "event=0xb,instance=0x47,umask=0x4"); -AMDGPU_PMU_ATTR(cake0_ftiinstat_rspalloc, - "event=0xb,instance=0x46,umask=0x8"); -AMDGPU_PMU_ATTR(cake1_ftiinstat_rspalloc, - "event=0xb,instance=0x47,umask=0x8"); - -/* df event attributes */ -static struct attribute *df_v3_6_event_attrs[] = { - &pmu_attr_cake0_pcsout_txdata.attr, - &pmu_attr_cake1_pcsout_txdata.attr, - &pmu_attr_cake0_pcsout_txmeta.attr, - &pmu_attr_cake1_pcsout_txmeta.attr, - &pmu_attr_cake0_ftiinstat_reqalloc.attr, - &pmu_attr_cake1_ftiinstat_reqalloc.attr, - &pmu_attr_cake0_ftiinstat_rspalloc.attr, - &pmu_attr_cake1_ftiinstat_rspalloc.attr, - NULL -}; +/* Defined in global_features.h as FTI_PERFMON_VISIBLE */ +#define DF_V3_6_MAX_COUNTERS 4 -/* df event attribute group */ -static struct attribute_group df_v3_6_event_attr_group = { - .name = "events", - .attrs = df_v3_6_event_attrs -}; +/* get flags from df perfmon config */ +#define DF_V3_6_GET_EVENT(x) (x & 0xFFUL) +#define DF_V3_6_GET_INSTANCE(x) ((x >> 8) & 0xFFUL) +#define DF_V3_6_GET_UNITMASK(x) ((x >> 16) & 0xFFUL) +#define DF_V3_6_PERFMON_OVERFLOW 0xFFFFFFFFFFFFULL -/* df event attr groups */ -const struct attribute_group *df_v3_6_attr_groups[] = { - &df_v3_6_format_attr_group, - &df_v3_6_event_attr_group, - NULL -}; +static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, + 16, 32, 0, 0, 0, 2, 4, 8}; static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, uint32_t ficaa_val) @@ -183,6 +132,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } +/* same as perfmon_wreg but return status on write value check */ +static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev, + uint32_t lo_addr, uint32_t lo_val, + uint32_t hi_addr, uint32_t hi_val) +{ + unsigned long flags, address, data; + uint32_t lo_val_rb, hi_val_rb; + + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, lo_addr); + WREG32(data, lo_val); + WREG32(address, hi_addr); + WREG32(data, hi_val); + + WREG32(address, lo_addr); + lo_val_rb = RREG32(data); + WREG32(address, hi_addr); + hi_val_rb = RREG32(data); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + + if (!(lo_val == lo_val_rb && hi_val == hi_val_rb)) + return -EBUSY; + + return 0; +} + + +/* + * retry arming counters every 100 usecs within 1 millisecond interval. + * if retry fails after time out, return error. + */ +#define ARM_RETRY_USEC_TIMEOUT 1000 +#define ARM_RETRY_USEC_INTERVAL 100 +static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev, + uint32_t lo_addr, uint32_t lo_val, + uint32_t hi_addr, uint32_t hi_val) +{ + int countdown = ARM_RETRY_USEC_TIMEOUT; + + while (countdown) { + + if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val, + hi_addr, hi_val)) + break; + + countdown -= ARM_RETRY_USEC_INTERVAL; + udelay(ARM_RETRY_USEC_INTERVAL); + } + + return countdown > 0 ? 0 : -ETIME; +} + /* get the number of df counters available */ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, struct device_attribute *attr, @@ -193,7 +197,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, int i, count; ddev = dev_get_drvdata(dev); - adev = ddev->dev_private; + adev = drm_to_adev(ddev); count = 0; for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { @@ -201,12 +205,38 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, count++; } - return snprintf(buf, PAGE_SIZE, "%i\n", count); + return sysfs_emit(buf, "%i\n", count); } /* device attr for available perfmon counters */ static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL); +static void df_v3_6_query_hashes(struct amdgpu_device *adev) +{ + u32 tmp; + + adev->df.hash_status.hash_64k = false; + adev->df.hash_status.hash_2m = false; + adev->df.hash_status.hash_1g = false; + + /* encoding for hash-enabled on Arcturus and Aldebaran */ + if ((adev->asic_type == CHIP_ARCTURUS && + adev->df.funcs->get_fb_channel_number(adev) == 0xe) || + (adev->asic_type == CHIP_ALDEBARAN && + adev->df.funcs->get_fb_channel_number(adev) == 0x1e)) { + tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl); + adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp, + DF_CS_UMC_AON0_DfGlobalCtrl, + GlbHashIntlvCtl64K); + adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp, + DF_CS_UMC_AON0_DfGlobalCtrl, + GlbHashIntlvCtl2M); + adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp, + DF_CS_UMC_AON0_DfGlobalCtrl, + GlbHashIntlvCtl1G); + } +} + /* init perfmons */ static void df_v3_6_sw_init(struct amdgpu_device *adev) { @@ -218,6 +248,8 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++) adev->df_perfmon_config_assign_mask[i] = 0; + + df_v3_6_query_hashes(adev); } static void df_v3_6_sw_fini(struct amdgpu_device *adev) @@ -245,8 +277,14 @@ static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev) { u32 tmp; - tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); - tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + if (adev->asic_type == CHIP_ALDEBARAN) { + tmp = RREG32_SOC15(DF, 0, mmDF_GCM_AON0_DramMegaBaseAddress0); + tmp &= + ALDEBARAN_DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + } else { + tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); + tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + } tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; return tmp; @@ -256,7 +294,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) { int fb_channel_number; - fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) fb_channel_number = 0; @@ -270,7 +308,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); + adev->df.funcs->enable_broadcast_mode(adev, true); if (enable) { tmp = RREG32_SOC15(DF, 0, @@ -289,7 +327,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, } /* Exit broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); + adev->df.funcs->enable_broadcast_mode(adev, false); } } @@ -305,49 +343,44 @@ static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, } /* get assigned df perfmon ctr as int */ -static int df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev, - uint64_t config) +static bool df_v3_6_pmc_has_counter(struct amdgpu_device *adev, + uint64_t config, + int counter_idx) { - int i; - for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { - if ((config & 0x0FFFFFFUL) == - adev->df_perfmon_config_assign_mask[i]) - return i; - } + return ((config & 0x0FFFFFFUL) == + adev->df_perfmon_config_assign_mask[counter_idx]); - return -EINVAL; } /* get address based on counter assignment */ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev, uint64_t config, + int counter_idx, int is_ctrl, uint32_t *lo_base_addr, uint32_t *hi_base_addr) { - int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); - - if (target_cntr < 0) + if (!df_v3_6_pmc_has_counter(adev, config, counter_idx)) return; - switch (target_cntr) { + switch (counter_idx) { case 0: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4; break; case 1: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5; break; case 2: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6; break; case 3: - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3; - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3; + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7; + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7; break; } @@ -357,25 +390,30 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev, /* get read counter address */ static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev, uint64_t config, + int counter_idx, uint32_t *lo_base_addr, uint32_t *hi_base_addr) { - df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr); + df_v3_6_pmc_get_addr(adev, config, counter_idx, 0, lo_base_addr, + hi_base_addr); } /* get control counter settings i.e. address and values to set */ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, uint64_t config, + int counter_idx, uint32_t *lo_base_addr, uint32_t *hi_base_addr, uint32_t *lo_val, - uint32_t *hi_val) + uint32_t *hi_val, + bool is_enable) { uint32_t eventsel, instance, unitmask; uint32_t instance_10, instance_5432, instance_76; - df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr); + df_v3_6_pmc_get_addr(adev, config, counter_idx, 1, lo_base_addr, + hi_base_addr); if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) { DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x", @@ -391,7 +429,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, instance_5432 = (instance >> 2) & 0xf; instance_76 = (instance >> 6) & 0x3; - *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22); + *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel; + *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22); *hi_val = (instance_76 << 29) | instance_5432; DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", @@ -404,41 +443,65 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, uint64_t config) { - int i, target_cntr; - - target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); - - if (target_cntr >= 0) - return 0; + int i; for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { if (adev->df_perfmon_config_assign_mask[i] == 0U) { adev->df_perfmon_config_assign_mask[i] = config & 0x0FFFFFFUL; - return 0; + return i; } } return -ENOSPC; } +#define DEFERRED_ARM_MASK (1 << 31) +static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev, + int counter_idx, uint64_t config, + bool is_deferred) +{ + + if (!df_v3_6_pmc_has_counter(adev, config, counter_idx)) + return -EINVAL; + + if (is_deferred) + adev->df_perfmon_config_assign_mask[counter_idx] |= + DEFERRED_ARM_MASK; + else + adev->df_perfmon_config_assign_mask[counter_idx] &= + ~DEFERRED_ARM_MASK; + + return 0; +} + +static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev, + int counter_idx, + uint64_t config) +{ + return (df_v3_6_pmc_has_counter(adev, config, counter_idx) && + (adev->df_perfmon_config_assign_mask[counter_idx] + & DEFERRED_ARM_MASK)); + +} + /* release performance counter */ static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev, - uint64_t config) + uint64_t config, + int counter_idx) { - int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); - - if (target_cntr >= 0) - adev->df_perfmon_config_assign_mask[target_cntr] = 0ULL; + if (df_v3_6_pmc_has_counter(adev, config, counter_idx)) + adev->df_perfmon_config_assign_mask[counter_idx] = 0ULL; } static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev, - uint64_t config) + uint64_t config, + int counter_idx) { - uint32_t lo_base_addr, hi_base_addr; + uint32_t lo_base_addr = 0, hi_base_addr = 0; - df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr, + df_v3_6_pmc_get_read_settings(adev, config, counter_idx, &lo_base_addr, &hi_base_addr); if ((lo_base_addr == 0) || (hi_base_addr == 0)) @@ -447,33 +510,40 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev, df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); } +/* return available counter if is_add == 1 otherwise return error status. */ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, - int is_enable) + int counter_idx, int is_add) { uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; - int ret = 0; + int err = 0, ret = 0; switch (adev->asic_type) { case CHIP_VEGA20: + case CHIP_ARCTURUS: + if (is_add) + return df_v3_6_pmc_add_cntr(adev, config); - df_v3_6_reset_perfmon_cntr(adev, config); - - if (is_enable) { - ret = df_v3_6_pmc_add_cntr(adev, config); - } else { - ret = df_v3_6_pmc_get_ctrl_settings(adev, + ret = df_v3_6_pmc_get_ctrl_settings(adev, config, + counter_idx, &lo_base_addr, &hi_base_addr, &lo_val, - &hi_val); + &hi_val, + true); - if (ret) - return ret; + if (ret) + return ret; - df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, - hi_base_addr, hi_val); - } + err = df_v3_6_perfmon_arm_with_retry(adev, + lo_base_addr, + lo_val, + hi_base_addr, + hi_val); + + if (err) + ret = df_v3_6_pmc_set_deferred(adev, config, + counter_idx, true); break; default: @@ -484,27 +554,33 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, } static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, - int is_disable) + int counter_idx, int is_remove) { uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; int ret = 0; switch (adev->asic_type) { case CHIP_VEGA20: + case CHIP_ARCTURUS: ret = df_v3_6_pmc_get_ctrl_settings(adev, config, + counter_idx, &lo_base_addr, &hi_base_addr, &lo_val, - &hi_val); + &hi_val, + false); if (ret) return ret; - df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); + df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, + hi_base_addr, hi_val); - if (is_disable) - df_v3_6_pmc_release_cntr(adev, config); + if (is_remove) { + df_v3_6_reset_perfmon_cntr(adev, config, counter_idx); + df_v3_6_pmc_release_cntr(adev, config, counter_idx); + } break; default: @@ -516,20 +592,34 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, uint64_t config, + int counter_idx, uint64_t *count) { - uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; + uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0; *count = 0; switch (adev->asic_type) { case CHIP_VEGA20: - - df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr, - &hi_base_addr); + case CHIP_ARCTURUS: + df_v3_6_pmc_get_read_settings(adev, config, counter_idx, + &lo_base_addr, &hi_base_addr); if ((lo_base_addr == 0) || (hi_base_addr == 0)) return; + /* rearm the counter or throw away count value on failure */ + if (df_v3_6_pmc_is_deferred(adev, config, counter_idx)) { + int rearm_err = df_v3_6_perfmon_arm_with_status(adev, + lo_base_addr, lo_val, + hi_base_addr, hi_val); + + if (rearm_err) + return; + + df_v3_6_pmc_set_deferred(adev, config, counter_idx, + false); + } + df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, hi_base_addr, &hi_val); @@ -542,12 +632,41 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, config, lo_base_addr, hi_base_addr, lo_val, hi_val); break; - default: break; } } +static bool df_v3_6_query_ras_poison_mode(struct amdgpu_device *adev) +{ + uint32_t hw_assert_msklo, hw_assert_mskhi; + uint32_t v0, v1, v28, v31; + + hw_assert_msklo = RREG32_SOC15(DF, 0, + mmDF_CS_UMC_AON0_HardwareAssertMaskLow); + hw_assert_mskhi = RREG32_SOC15(DF, 0, + mmDF_NCS_PG0_HardwareAssertMaskHigh); + + v0 = REG_GET_FIELD(hw_assert_msklo, + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0); + v1 = REG_GET_FIELD(hw_assert_msklo, + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1); + v28 = REG_GET_FIELD(hw_assert_mskhi, + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28); + v31 = REG_GET_FIELD(hw_assert_mskhi, + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31); + + if (v0 && v1 && v28 && v31) + return true; + else if (!v0 && !v1 && !v28 && !v31) + return false; + else { + dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n", + v0, v1, v28, v31); + return false; + } +} + const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, .sw_fini = df_v3_6_sw_fini, @@ -561,5 +680,6 @@ const struct amdgpu_df_funcs df_v3_6_funcs = { .pmc_stop = df_v3_6_pmc_stop, .pmc_get_count = df_v3_6_pmc_get_count, .get_fica = df_v3_6_get_fica, - .set_fica = df_v3_6_set_fica + .set_fica = df_v3_6_set_fica, + .query_ras_poison_mode = df_v3_6_query_ras_poison_mode, }; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h index 76998541bc30..2505c7ef258a 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h @@ -35,15 +35,6 @@ enum DF_V3_6_MGCG { DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15 }; -/* Defined in global_features.h as FTI_PERFMON_VISIBLE */ -#define DF_V3_6_MAX_COUNTERS 4 - -/* get flags from df perfmon config */ -#define DF_V3_6_GET_EVENT(x) (x & 0xFFUL) -#define DF_V3_6_GET_INSTANCE(x) ((x >> 8) & 0xFFUL) -#define DF_V3_6_GET_UNITMASK(x) ((x >> 16) & 0xFFUL) -#define DF_V3_6_PERFMON_OVERFLOW 0xFFFFFFFFFFFFULL - extern const struct attribute_group *df_v3_6_attr_groups[]; extern const struct amdgpu_df_funcs df_v3_6_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c index 88efaecf9f70..e9f177e9e3cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c @@ -1,5 +1,5 @@ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,12 +24,13 @@ #include "nv.h" #include "soc15_common.h" -#include "navi10_ip_offset.h" +#include "soc15_hw_ip.h" +#include "dimgrey_cavefish_ip_offset.h" -int navi10_reg_base_init(struct amdgpu_device *adev) +int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev) { - int i; - + /* HW has more IP blocks, only initialize the block needed by driver */ + uint32_t i; for (i = 0 ; i < MAX_INSTANCE ; ++i) { adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); @@ -38,18 +39,16 @@ int navi10_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN0_BASE.instance[i])); adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); } - return 0; } - - diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index ba9e53a1abc3..90a834dc4008 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -29,39 +29,164 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_psp.h" -#include "amdgpu_smu.h" #include "nv.h" #include "nvd.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" +#include "smuio/smuio_11_0_0_offset.h" +#include "smuio/smuio_11_0_0_sh_mask.h" #include "navi10_enum.h" -#include "hdp/hdp_5_0_0_offset.h" #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h" #include "soc15.h" +#include "soc15d.h" #include "soc15_common.h" #include "clearstate_gfx10.h" #include "v10_structs.h" #include "gfx_v10_0.h" #include "nbio_v2_3.h" -/** +/* * Navi10 has two graphic rings to share each graphic pipe. * 1. Primary ring * 2. Async ring - * - * In bring-up phase, it just used primary ring so set gfx ring number as 1 at - * first. */ -#define GFX10_NUM_GFX_RINGS 2 +#define GFX10_NUM_GFX_RINGS_NV1X 1 +#define GFX10_NUM_GFX_RINGS_Sienna_Cichlid 1 #define GFX10_MEC_HPD_SIZE 2048 +#define RLCG_VFGATE_DISABLED 0x4000000 +#define RLCG_WRONG_OPERATION_TYPE 0x2000000 +#define RLCG_NOT_IN_RANGE 0x1000000 + #define F32_CE_PROGRAM_RAM_SIZE 65536 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define mmCGTT_GS_NGG_CLK_CTRL 0x5087 #define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1 +#define mmCGTT_SPI_RA0_CLK_CTRL 0x507a +#define mmCGTT_SPI_RA0_CLK_CTRL_BASE_IDX 1 +#define mmCGTT_SPI_RA1_CLK_CTRL 0x507b +#define mmCGTT_SPI_RA1_CLK_CTRL_BASE_IDX 1 + +#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8 +#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L + +#define mmCGTS_TCC_DISABLE_gc_10_3 0x5006 +#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX 1 +#define mmCGTS_USER_TCC_DISABLE_gc_10_3 0x5007 +#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX 1 + +#define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55 +#define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0 +#define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0 +#define mmRLC_SAFE_MODE_Sienna_Cichlid_BASE_IDX 1 +#define mmRLC_CP_SCHEDULERS_Sienna_Cichlid 0x4ca1 +#define mmRLC_CP_SCHEDULERS_Sienna_Cichlid_BASE_IDX 1 +#define mmSPI_CONFIG_CNTL_Sienna_Cichlid 0x11ec +#define mmSPI_CONFIG_CNTL_Sienna_Cichlid_BASE_IDX 0 +#define mmVGT_ESGS_RING_SIZE_Sienna_Cichlid 0x0fc1 +#define mmVGT_ESGS_RING_SIZE_Sienna_Cichlid_BASE_IDX 0 +#define mmVGT_GSVS_RING_SIZE_Sienna_Cichlid 0x0fc2 +#define mmVGT_GSVS_RING_SIZE_Sienna_Cichlid_BASE_IDX 0 +#define mmVGT_TF_RING_SIZE_Sienna_Cichlid 0x0fc3 +#define mmVGT_TF_RING_SIZE_Sienna_Cichlid_BASE_IDX 0 +#define mmVGT_HS_OFFCHIP_PARAM_Sienna_Cichlid 0x0fc4 +#define mmVGT_HS_OFFCHIP_PARAM_Sienna_Cichlid_BASE_IDX 0 +#define mmVGT_TF_MEMORY_BASE_Sienna_Cichlid 0x0fc5 +#define mmVGT_TF_MEMORY_BASE_Sienna_Cichlid_BASE_IDX 0 +#define mmVGT_TF_MEMORY_BASE_HI_Sienna_Cichlid 0x0fc6 +#define mmVGT_TF_MEMORY_BASE_HI_Sienna_Cichlid_BASE_IDX 0 +#define GRBM_STATUS2__RLC_BUSY_Sienna_Cichlid__SHIFT 0x1a +#define GRBM_STATUS2__RLC_BUSY_Sienna_Cichlid_MASK 0x04000000L +#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_Sienna_Cichlid_MASK 0x00000FFCL +#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_Sienna_Cichlid__SHIFT 0x2 +#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_Sienna_Cichlid_MASK 0x00000FFCL +#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580 +#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0 + +#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1 +#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441 +#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1 +#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261 +#define mmVGT_TF_MEMORY_BASE_HI_Vangogh_BASE_IDX 1 +#define mmVGT_HS_OFFCHIP_PARAM_Vangogh 0x224f +#define mmVGT_HS_OFFCHIP_PARAM_Vangogh_BASE_IDX 1 +#define mmVGT_TF_RING_SIZE_Vangogh 0x224e +#define mmVGT_TF_RING_SIZE_Vangogh_BASE_IDX 1 +#define mmVGT_GSVS_RING_SIZE_Vangogh 0x2241 +#define mmVGT_GSVS_RING_SIZE_Vangogh_BASE_IDX 1 +#define mmVGT_TF_MEMORY_BASE_Vangogh 0x2250 +#define mmVGT_TF_MEMORY_BASE_Vangogh_BASE_IDX 1 +#define mmVGT_ESGS_RING_SIZE_Vangogh 0x2240 +#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1 +#define mmSPI_CONFIG_CNTL_Vangogh 0x2440 +#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1 +#define mmGCR_GENERAL_CNTL_Vangogh 0x1580 +#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0 +#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh 0x0000FFFFL + +#define mmCP_HYP_PFP_UCODE_ADDR 0x5814 +#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1 +#define mmCP_HYP_PFP_UCODE_DATA 0x5815 +#define mmCP_HYP_PFP_UCODE_DATA_BASE_IDX 1 +#define mmCP_HYP_CE_UCODE_ADDR 0x5818 +#define mmCP_HYP_CE_UCODE_ADDR_BASE_IDX 1 +#define mmCP_HYP_CE_UCODE_DATA 0x5819 +#define mmCP_HYP_CE_UCODE_DATA_BASE_IDX 1 +#define mmCP_HYP_ME_UCODE_ADDR 0x5816 +#define mmCP_HYP_ME_UCODE_ADDR_BASE_IDX 1 +#define mmCP_HYP_ME_UCODE_DATA 0x5817 +#define mmCP_HYP_ME_UCODE_DATA_BASE_IDX 1 + +#define mmCPG_PSP_DEBUG 0x5c10 +#define mmCPG_PSP_DEBUG_BASE_IDX 1 +#define mmCPC_PSP_DEBUG 0x5c11 +#define mmCPC_PSP_DEBUG_BASE_IDX 1 +#define CPC_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L +#define CPG_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L + +//CC_GC_SA_UNIT_DISABLE +#define mmCC_GC_SA_UNIT_DISABLE 0x0fe9 +#define mmCC_GC_SA_UNIT_DISABLE_BASE_IDX 0 +#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8 +#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L +//GC_USER_SA_UNIT_DISABLE +#define mmGC_USER_SA_UNIT_DISABLE 0x0fea +#define mmGC_USER_SA_UNIT_DISABLE_BASE_IDX 0 +#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8 +#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L +//PA_SC_ENHANCE_3 +#define mmPA_SC_ENHANCE_3 0x1085 +#define mmPA_SC_ENHANCE_3_BASE_IDX 0 +#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3 +#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L + +#define mmCGTT_SPI_CS_CLK_CTRL 0x507c +#define mmCGTT_SPI_CS_CLK_CTRL_BASE_IDX 1 + +#define mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid 0x16f3 +#define mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0 +#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db +#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0 + +#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030 +#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0 + +#define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5 +#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1 + +#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28) +#define GFX_RLCG_GC_WRITE (0x0 << 28) +#define GFX_RLCG_GC_READ (0x1 << 28) +#define GFX_RLCG_MMHUB_WRITE (0x2 << 28) + +#define RLCG_ERROR_REPORT_ENABLED(adev) \ + (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); @@ -89,6 +214,62 @@ MODULE_FIRMWARE("amdgpu/navi12_mec.bin"); MODULE_FIRMWARE("amdgpu/navi12_mec2.bin"); MODULE_FIRMWARE("amdgpu/navi12_rlc.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_ce.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_pfp.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_me.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_mec.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_mec2.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/navy_flounder_ce.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_pfp.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_me.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_mec.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_mec2.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/vangogh_ce.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_pfp.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_me.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_mec.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_mec2.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_ce.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_pfp.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_me.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec2.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/beige_goby_ce.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_pfp.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_me.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_mec.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_mec2.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/yellow_carp_ce.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_pfp.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_me.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_mec.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_mec2.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/cyan_skillfish_ce.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_pfp.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_me.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec2.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_ce.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_pfp.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_me.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), @@ -123,7 +304,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), @@ -138,6 +319,1062 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x33), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) +}; + static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), @@ -171,7 +1408,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070105), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), @@ -196,9 +1433,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), @@ -216,37 +1454,2124 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000) }; +static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, + int write, u32 *rlcg_flag) +{ + switch (hwip) { + case GC_HWIP: + if (amdgpu_sriov_reg_indirect_gc(adev)) { + *rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ; + + return true; + /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */ + } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) { + *rlcg_flag = GFX_RLCG_GC_WRITE_OLD; + + return true; + } + + break; + case MMHUB_HWIP: + if (amdgpu_sriov_reg_indirect_mmhub(adev) && + (acc_flags & AMDGPU_REGS_RLC) && write) { + *rlcg_flag = GFX_RLCG_MMHUB_WRITE; + return true; + } + + break; + default: + DRM_DEBUG("Not program register by RLCG\n"); + } + + return false; +} + +static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag) +{ + static void *scratch_reg0; + static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; + static void *spare_int; + static uint32_t grbm_cntl; + static uint32_t grbm_idx; + uint32_t i = 0; + uint32_t retries = 50000; + u32 ret = 0; + u32 tmp; + + scratch_reg0 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4; + scratch_reg1 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4; + scratch_reg2 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4; + scratch_reg3 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4; + + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { + spare_int = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX] + + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4; + } else { + spare_int = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4; + } + + grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; + grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; + + if (offset == grbm_cntl || offset == grbm_idx) { + if (offset == grbm_cntl) + writel(v, scratch_reg2); + else if (offset == grbm_idx) + writel(v, scratch_reg3); + + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + } else { + writel(v, scratch_reg0); + writel(offset | flag, scratch_reg1); + writel(1, spare_int); + + for (i = 0; i < retries; i++) { + tmp = readl(scratch_reg1); + if (!(tmp & flag)) + break; + + udelay(10); + } + + if (i >= retries) { + if (RLCG_ERROR_REPORT_ENABLED(adev)) { + if (tmp & RLCG_VFGATE_DISABLED) + pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset); + else if (tmp & RLCG_WRONG_OPERATION_TYPE) + pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset); + else if (tmp & RLCG_NOT_IN_RANGE) + pr_err("The register is not in range, program reg:0x%05x failed!\n", offset); + else + pr_err("Unknown error type, program reg:0x%05x failed!\n", offset); + } else + pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset); + } + } + + ret = readl(scratch_reg0); + + return ret; +} + +static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip) +{ + u32 rlcg_flag; + + if (!amdgpu_sriov_runtime(adev) && + gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) { + gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag); + return; + } + + if (acc_flags & AMDGPU_REGS_NO_KIQ) + WREG32_NO_KIQ(offset, value); + else + WREG32(offset, value); +} + +static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip) +{ + u32 rlcg_flag; + + if (!amdgpu_sriov_runtime(adev) && + gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag)) + return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag); + + if (acc_flags & AMDGPU_REGS_NO_KIQ) + return RREG32_NO_KIQ(offset); + else + return RREG32(offset); +} + static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1ac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1bc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1cc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x26), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x25), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x3b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) +}; + static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = { /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x1f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x22), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x6), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x10), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x15), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x35), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000) +}; + +static const struct soc15_reg_golden golden_settings_gc_10_3[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), + SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xff000000, 0xff008080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xff000000, 0xff008080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x10f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffffffff, 0x00070104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) +}; + +static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] = +{ + /* Pending on emulation bring up */ +}; + +static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xffffffff, 0xff008080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xffff8fff, 0xff008080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffffffff, 0x00070104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_START_PHASE, 0x000000ff, 0x00000004), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), + + /* This is not in GDB yet. Don't remove it. It fixes a GPU hang on Navy Flounder. */ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), +}; + +static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), + + /* This is not in GDB yet. Don't remove it. It fixes a GPU hang on VanGogh. */ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), +}; + +static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000) +}; + +static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0x30000000, 0x30000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0x7e000000, 0x7e000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0x40000000, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0x00040000, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0x01000000, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0x0000001f, 0x00180070), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020) +}; + +static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xb0000ff0, 0x30000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff000000, 0x7e000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) +}; + +static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_FAST_CLKS, 0x3fffffff, 0x0000493e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x3c000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0xa0000000, 0xa0000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x00008000, 0x003c8014), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_DRAM_BURST_CTRL, 0x00000010, 0x00000017), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xd8d8d8d8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000003), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860210), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00009d00, 0x00008500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0xffffffff, 0x000fffff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_DRAM_BURST_CTRL, 0x00000010, 0x00000017), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xfcfcfcfc, 0xd8d8d8d8), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77707770, 0x21302130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77707770, 0x21302130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xfc02002f, 0x9402002f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0x00002188, 0x00000188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x08000009, 0x08000009), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xcc3fcc03, 0x842a4c02), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000000f, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffff3109, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x00030008, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) +/* TODO: pending on golden setting value of gb address config */ +#define CYAN_SKILLFISH_GB_ADDR_CONFIG_GOLDEN 0x00100044 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev); static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info); + struct amdgpu_cu_info *cu_info); static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); @@ -258,7 +3583,10 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev); static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume); static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); -static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start); +static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); +static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev); +static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev); +static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { @@ -348,15 +3676,29 @@ static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring, amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); } +static void gfx10_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(kiq_ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(1) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); +} + static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = { .kiq_set_resources = gfx10_kiq_set_resources, .kiq_map_queues = gfx10_kiq_map_queues, .kiq_unmap_queues = gfx10_kiq_unmap_queues, .kiq_query_status = gfx10_kiq_query_status, + .kiq_invalidate_tlbs = gfx10_kiq_invalidate_tlbs, .set_resources_size = 8, .map_queues_size = 7, .unmap_queues_size = 6, .query_status_size = 7, + .invalidate_tlbs_size = 2, }; static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) @@ -364,10 +3706,33 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs; } +static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_0_nv10, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10)); + break; + case IP_VERSION(10, 1, 1): + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_1_nv14, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14)); + break; + case IP_VERSION(10, 1, 2): + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_1_2_nv12, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12)); + break; + default: + break; + } +} + static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): soc15_program_register_sequence(adev, golden_settings_gc_10_1, (const u32)ARRAY_SIZE(golden_settings_gc_10_1)); @@ -375,7 +3740,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_0_nv10, (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10)); break; - case CHIP_NAVI14: + case IP_VERSION(10, 1, 1): soc15_program_register_sequence(adev, golden_settings_gc_10_1_1, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1)); @@ -383,7 +3748,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_1_nv14, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); break; - case CHIP_NAVI12: + case IP_VERSION(10, 1, 2): soc15_program_register_sequence(adev, golden_settings_gc_10_1_2, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2)); @@ -391,9 +3756,48 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_1_2_nv12, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12)); break; + case IP_VERSION(10, 3, 0): + soc15_program_register_sequence(adev, + golden_settings_gc_10_3, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3)); + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_sienna_cichlid, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_sienna_cichlid)); + break; + case IP_VERSION(10, 3, 2): + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_2, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_2)); + break; + case IP_VERSION(10, 3, 1): + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_vangogh, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_vangogh)); + break; + case IP_VERSION(10, 3, 3): + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_3, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_3)); + break; + case IP_VERSION(10, 3, 4): + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_4, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); + break; + case IP_VERSION(10, 3, 5): + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_5, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_5)); + break; + case IP_VERSION(10, 1, 3): + soc15_program_register_sequence(adev, + golden_settings_gc_10_0_cyan_skillfish, + (const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish)); + break; default: break; } + gfx_v10_0_init_spm_golden_registers(adev); } static void gfx_v10_0_scratch_init(struct amdgpu_device *adev) @@ -474,18 +3878,10 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) else udelay(1); } - if (i < adev->usec_timeout) { - if (amdgpu_emu_mode == 1) - DRM_INFO("ring test on %d succeeded in %d msecs\n", - ring->idx, i); - else - DRM_INFO("ring test on %d succeeded in %d usecs\n", - ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", - ring->idx, scratch, tmp); - r = -EINVAL; - } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + amdgpu_gfx_scratch_free(adev, scratch); return r; @@ -496,29 +3892,29 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - uint32_t scratch; - uint32_t tmp = 0; + unsigned index; + uint64_t gpu_addr; + uint32_t tmp; long r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } - - WREG32(scratch, 0xCAFEDEAD); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + r = amdgpu_ib_get(adev, NULL, 16, + AMDGPU_IB_POOL_DIRECT, &ib); + if (r) goto err1; - } - ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); - ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); - ib.ptr[2] = 0xDEADBEEF; - ib.length_dw = 3; + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); + ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; + ib.ptr[2] = lower_32_bits(gpu_addr); + ib.ptr[3] = upper_32_bits(gpu_addr); + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) @@ -526,29 +3922,22 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + tmp = adev->wb.wb[index]; + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); + else r = -EINVAL; - } err2: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err1: - amdgpu_gfx_scratch_free(adev, scratch); - + amdgpu_device_wb_free(adev, index); return r; } @@ -574,10 +3963,11 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) { adev->gfx.cp_fw_write_wait = false; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 3): if ((adev->gfx.me_fw_version >= 0x00000046) && (adev->gfx.me_feature_version >= 27) && (adev->gfx.pfp_fw_version >= 0x00000068) && @@ -586,13 +3976,20 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 27)) adev->gfx.cp_fw_write_wait = true; break; + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + adev->gfx.cp_fw_write_wait = true; + break; default: break; } - if (adev->gfx.cp_fw_write_wait == false) - DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ - GRBM requires 1-cycle delay in cp firmware\n"); + if (!adev->gfx.cp_fw_write_wait) + DRM_WARN_ONCE("CP firmware version too old, please update!"); } @@ -617,11 +4014,40 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); } +static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_2 *rlc_hdr; + + rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); + adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); + adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); + adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); +} + +static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) +{ + bool ret = false; + + switch (adev->pdev->revision) { + case 0xc2: + case 0xc3: + ret = true; + break; + default: + ret = false; + break; + } + + return ret ; +} + static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; default: break; @@ -632,7 +4058,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; char fw_name[40]; - char wks[10]; + char *wks = ""; int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; @@ -645,20 +4071,43 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - memset(wks, 0, sizeof(wks)); - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(10, 1, 1): chip_name = "navi14"; if (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)) - snprintf(wks, sizeof(wks), "_wks"); + wks = "_wks"; break; - case CHIP_NAVI12: + case IP_VERSION(10, 1, 2): chip_name = "navi12"; break; + case IP_VERSION(10, 3, 0): + chip_name = "sienna_cichlid"; + break; + case IP_VERSION(10, 3, 2): + chip_name = "navy_flounder"; + break; + case IP_VERSION(10, 3, 1): + chip_name = "vangogh"; + break; + case IP_VERSION(10, 3, 4): + chip_name = "dimgrey_cavefish"; + break; + case IP_VERSION(10, 3, 5): + chip_name = "beige_goby"; + break; + case IP_VERSION(10, 3, 3): + chip_name = "yellow_carp"; + break; + case IP_VERSION(10, 1, 3): + if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) + chip_name = "cyan_skillfish2"; + else + chip_name = "cyan_skillfish"; + break; default: BUG(); } @@ -705,8 +4154,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; version_major = le16_to_cpu(rlc_hdr->header.header_version_major); version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); - if (version_major == 2 && version_minor == 1) - adev->gfx.rlc.is_rlc_v2_1 = true; adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); @@ -748,8 +4195,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); - if (adev->gfx.rlc.is_rlc_v2_1) - gfx_v10_0_init_rlc_ext_microcode(adev); + if (version_major == 2) { + if (version_minor >= 1) + gfx_v10_0_init_rlc_ext_microcode(adev); + if (version_minor == 2) + gfx_v10_0_init_rlc_iram_dram_microcode(adev); + } } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks); @@ -805,12 +4256,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; info->ucode_id = AMDGPU_UCODE_ID_RLC_G; info->fw = adev->gfx.rlc_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - - if (adev->gfx.rlc.is_rlc_v2_1 && - adev->gfx.rlc.save_restore_list_cntl_size_bytes && + if (info->fw) { + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + if (adev->gfx.rlc.save_restore_list_cntl_size_bytes && adev->gfx.rlc.save_restore_list_gpm_size_bytes && adev->gfx.rlc.save_restore_list_srm_size_bytes) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; @@ -830,6 +4281,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) info->fw = adev->gfx.rlc_fw; adev->firmware.fw_size += ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); + + if (adev->gfx.rlc.rlc_iram_ucode_size_bytes && + adev->gfx.rlc.rlc_dram_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); + } } info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; @@ -998,6 +4464,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1039,21 +4509,23 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE; - r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_obj, - &adev->gfx.mec.hpd_eop_gpu_addr, - (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - gfx_v10_0_mec_fini(adev); - return r; - } + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + gfx_v10_0_mec_fini(adev); + return r; + } - memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); + memset(hpd, 0, mec_hpd_size); - amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; @@ -1127,6 +4599,7 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -1151,11 +4624,26 @@ static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) - { - nv_grbm_select(adev, me, pipe, q, vm); - } + u32 me, u32 pipe, u32 q, u32 vm) +{ + nv_grbm_select(adev, me, pipe, q, vm); +} + +static void gfx_v10_0_update_perfmon_mgcg(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + data = def = RREG32_SOC15(GC, 0, mmRLC_PERFMON_CLK_CNTL); + + if (enable) + data |= RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE_MASK; + else + data &= ~RLC_PERFMON_CLK_CNTL__PERFMON_CLOCK_STATE_MASK; + + if (data != def) + WREG32_SOC15(GC, 0, mmRLC_PERFMON_CLK_CNTL, data); +} static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter, @@ -1164,6 +4652,8 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = { .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs, .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q, + .init_spm_golden = &gfx_v10_0_init_spm_golden_registers, + .update_perfmon_mgcg = &gfx_v10_0_update_perfmon_mgcg, }; static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) @@ -1172,16 +4662,39 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.funcs = &gfx_v10_0_gfx_funcs; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); + break; + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); + adev->gfx.config.gb_addr_config_fields.num_pkrs = + 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); + break; + case IP_VERSION(10, 1, 3): + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = CYAN_SKILLFISH_GB_ADDR_CONFIG_GOLDEN; break; default: BUG(); @@ -1234,8 +4747,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; - r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; return 0; @@ -1246,7 +4759,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, { int r; unsigned irq_type; - struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; + struct amdgpu_ring *ring; + unsigned int hw_prio; ring = &adev->gfx.compute_ring[ring_id]; @@ -1265,10 +4779,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ - r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + hw_prio, NULL); if (r) return r; @@ -1281,17 +4796,31 @@ static int gfx_v10_0_sw_init(void *handle) struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): adev->gfx.me.num_me = 1; - adev->gfx.me.num_pipe_per_me = 2; + adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; break; + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + adev->gfx.me.num_me = 1; + adev->gfx.me.num_pipe_per_me = 1; + adev->gfx.me.num_queue_per_pipe = 1; + adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_pipe_per_mec = 4; + adev->gfx.mec.num_queue_per_pipe = 4; + break; default: adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; @@ -1461,12 +4990,6 @@ static int gfx_v10_0_sw_fini(void *handle) return 0; } - -static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev) -{ - /* TODO */ -} - static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) { @@ -1515,12 +5038,18 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) int i, j; u32 data; u32 active_rbs = 0; + u32 bitmap; u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + bitmap = i * adev->gfx.config.max_sh_per_se + j; + if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) && + ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) + continue; gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v10_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * @@ -1543,6 +5072,11 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade uint32_t num_packer_per_sc; uint32_t pa_sc_tile_steering_override; + /* for ASICs that integrates GFX v10.3 + * pa_sc_tile_steering_override should be set to 0 */ + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + return 0; + /* init num_sc */ num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se * adev->gfx.config.num_sc_per_sh; @@ -1568,8 +5102,6 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade } #define DEFAULT_SH_MEM_BASES (0x6000) -#define FIRST_COMPUTE_VMID (8) -#define LAST_COMPUTE_VMID (16) static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) { @@ -1585,7 +5117,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); mutex_lock(&adev->srbm_mutex); - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { nv_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); @@ -1596,7 +5128,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) /* Initialize all compute VMIDs to have no GDS, GWS, or OA acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); @@ -1614,7 +5146,7 @@ static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev) * the driver can enable them for graphics. VMID0 should maintain * access so that HWS firmware can save/restore entries. */ - for (vmid = 1; vmid < 16; vmid++) { + for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0); @@ -1650,54 +5182,58 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) 4 + /* RMI */ 1); /* SQG */ - if (adev->asic_type == CHIP_NAVI10 || - adev->asic_type == CHIP_NAVI14 || - adev->asic_type == CHIP_NAVI12) { - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); - wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev); - /* - * Set corresponding TCP bits for the inactive WGPs in - * GCRD_SA_TARGETS_DISABLE - */ - gcrd_targets_disable_tcp = 0; - /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */ - utcl_invreq_disable = 0; - - for (k = 0; k < max_wgp_per_sh; k++) { - if (!(wgp_active_bitmap & (1 << k))) { - gcrd_targets_disable_tcp |= 3 << (2 * k); - utcl_invreq_disable |= (3 << (2 * k)) | - (3 << (2 * (max_wgp_per_sh + k))); - } + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev); + /* + * Set corresponding TCP bits for the inactive WGPs in + * GCRD_SA_TARGETS_DISABLE + */ + gcrd_targets_disable_tcp = 0; + /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */ + utcl_invreq_disable = 0; + + for (k = 0; k < max_wgp_per_sh; k++) { + if (!(wgp_active_bitmap & (1 << k))) { + gcrd_targets_disable_tcp |= 3 << (2 * k); + gcrd_targets_disable_tcp |= 1 << (k + (max_wgp_per_sh * 2)); + utcl_invreq_disable |= (3 << (2 * k)) | + (3 << (2 * (max_wgp_per_sh + k))); } - - tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE); - /* only override TCP & SQC bits */ - tmp &= 0xffffffff << (4 * max_wgp_per_sh); - tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask); - WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp); - - tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE); - /* only override TCP bits */ - tmp &= 0xffffffff << (2 * max_wgp_per_sh); - tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask); - WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp); } - } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); + tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE); + /* only override TCP & SQC bits */ + tmp &= (0xffffffffU << (4 * max_wgp_per_sh)); + tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask); + WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp); + + tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE); + /* only override TCP & SQC bits */ + tmp &= (0xffffffffU << (3 * max_wgp_per_sh)); + tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask); + WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp); + } } + + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); } static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) { /* TCCs are global (not instanced). */ - uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | - RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + uint32_t tcc_disable; + + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3); + } else { + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + } adev->gfx.config.tcc_disabled_mask = REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | @@ -1711,8 +5247,6 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); - gfx_v10_0_tiling_mode_table_init(adev); - gfx_v10_0_setup_rb(adev); gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info); gfx_v10_0_get_tcc_info(adev); @@ -1746,7 +5280,12 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, bool enable) { - u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); + u32 tmp; + + if (amdgpu_sriov_vf(adev)) + return; + + tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); @@ -1765,16 +5304,23 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, - adev->gfx.rlc.clear_state_gpu_addr >> 32); - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, - adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); - WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); - + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI, + adev->gfx.rlc.clear_state_gpu_addr >> 32); + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO, + adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + } else { + WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, + adev->gfx.rlc.clear_state_gpu_addr >> 32); + WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, + adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); + WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + } return 0; } -void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL); @@ -1828,10 +5374,10 @@ static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev) uint32_t tmp; /* enable Save Restore Machine */ - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); + tmp = RREG32_SOC15(GC, 0, mmRLC_SRM_CNTL); tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); + WREG32_SOC15(GC, 0, mmRLC_SRM_CNTL, tmp); } static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev) @@ -1866,7 +5412,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) { int r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && + adev->psp.autoload_supported) { r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); if (r) @@ -1877,6 +5424,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); } else { + if (amdgpu_sriov_vf(adev)) { + gfx_v10_0_init_csb(adev); + return 0; + } + adev->gfx.rlc.funcs->stop(adev); /* disable CG */ @@ -1921,7 +5473,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev) int ret; RLC_TABLE_OF_CONTENT *rlc_toc; - ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE, + ret = amdgpu_bo_create_reserved(adev, adev->psp.toc.size_bytes, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->gfx.rlc.rlc_toc_bo, &adev->gfx.rlc.rlc_toc_gpu_addr, @@ -1932,7 +5484,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev) } /* Copy toc from psp sos fw to rlc toc buffer */ - memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size); + memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc.start_addr, adev->psp.toc.size_bytes); rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf; while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) && @@ -1948,7 +5500,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev) rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4; rlc_toc++; - }; + } return 0; } @@ -2373,11 +5925,12 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (!enable) { - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; + + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { + WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); + } else { + WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); } - WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); for (i = 0; i < adev->usec_timeout; i++) { if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) @@ -2445,7 +5998,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -2458,6 +6011,14 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI, upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); + WREG32_SOC15(GC, 0, mmCP_HYP_PFP_UCODE_ADDR, 0); + + for (i = 0; i < pfp_hdr->jt_size; i++) + WREG32_SOC15(GC, 0, mmCP_HYP_PFP_UCODE_DATA, + le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); + + WREG32_SOC15(GC, 0, mmCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); + return 0; } @@ -2515,7 +6076,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); @@ -2527,6 +6088,14 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI, upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr)); + WREG32_SOC15(GC, 0, mmCP_HYP_CE_UCODE_ADDR, 0); + + for (i = 0; i < ce_hdr->jt_size; i++) + WREG32_SOC15(GC, 0, mmCP_HYP_CE_UCODE_DATA, + le32_to_cpup(fw_data + ce_hdr->jt_offset + i)); + + WREG32_SOC15(GC, 0, mmCP_HYP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); + return 0; } @@ -2584,7 +6153,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -2596,6 +6165,14 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI, upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); + WREG32_SOC15(GC, 0, mmCP_HYP_ME_UCODE_ADDR, 0); + + for (i = 0; i < me_hdr->jt_size; i++) + WREG32_SOC15(GC, 0, mmCP_HYP_ME_UCODE_DATA, + le32_to_cpup(fw_data + me_hdr->jt_offset + i)); + + WREG32_SOC15(GC, 0, mmCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); + return 0; } @@ -2692,18 +6269,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_commit(ring); /* submit cs packet to copy state 0 to next available state */ - ring = &adev->gfx.gfx_ring[1]; - r = amdgpu_ring_alloc(ring, 2); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); - return r; - } - - amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); - amdgpu_ring_write(ring, 0); + if (adev->gfx.num_gfx_rings > 1) { + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + r = amdgpu_ring_alloc(ring, 2); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } - amdgpu_ring_commit(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_commit(ring); + } return 0; } @@ -2723,23 +6302,42 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, { u32 tmp; - tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); - if (ring->use_doorbell) { - tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, - DOORBELL_OFFSET, ring->doorbell_index); - tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, - DOORBELL_EN, 1); - } else { - tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, - DOORBELL_EN, 0); - } - WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); - tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, - DOORBELL_RANGE_LOWER, ring->doorbell_index); - WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); + if (!amdgpu_async_gfx_ring) { + tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); + if (ring->use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 1); + } else { + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 0); + } + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); + } + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, + DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index); + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); + + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, + CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_Sienna_Cichlid_MASK); + break; + default: + tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, + DOORBELL_RANGE_LOWER, ring->doorbell_index); + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); - WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, - CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, + CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); + break; + } } static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) @@ -2800,39 +6398,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); /* Init gfx ring 1 for pipe 1 */ - mutex_lock(&adev->srbm_mutex); - gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); - ring = &adev->gfx.gfx_ring[1]; - rb_bufsz = order_base_2(ring->ring_size / 8); - tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - /* Initialize the ring buffer's write pointers */ - ring->wptr = 0; - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ - rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & - CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); - wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, - upper_32_bits(wptr_gpu_addr)); - - mdelay(1); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - - rb_addr = ring->gpu_addr >> 8; - WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); - WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); - - gfx_v10_0_cp_gfx_set_doorbell(adev, ring); - mutex_unlock(&adev->srbm_mutex); - + if (adev->gfx.num_gfx_rings > 1) { + mutex_lock(&adev->srbm_mutex); + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + /* Initialize the ring buffer's write pointers */ + ring->wptr = 0; + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); + /* Set the wb address wether it's enabled or not */ + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & + CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + + mdelay(1); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); + WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); + + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); + } /* Switch to pipe 0 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); @@ -2851,16 +6451,38 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - if (enable) { - WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0); + break; + default: + WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); + break; + } } else { - WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, - (CP_MEC_CNTL__MEC_ME1_HALT_MASK | - CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, + (CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + break; + default: + WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, + (CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + break; + } adev->gfx.kiq.ring.sched.ready = false; } udelay(50); @@ -2906,7 +6528,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -2942,12 +6564,29 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; /* tell RLC which is KIQ queue */ - tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); - tmp &= 0xffffff00; - tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); - WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); - tmp |= 0x80; - WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); + tmp &= 0xffffff00; + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp); + tmp |= 0x80; + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp); + break; + default: + tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); + tmp &= 0xffffff00; + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); + tmp |= 0x80; + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); + break; + } } static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring) @@ -3027,6 +6666,11 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring) DOORBELL_EN, 0); mqd->cp_rb_doorbell_control = tmp; + /*if there are 2 gfx rings, set the lower doorbell range of the first ring, + *otherwise the range of the second ring will override the first ring */ + if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1) + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ ring->wptr = 0; mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR); @@ -3092,7 +6736,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) struct v10_gfx_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.gfx_ring[0]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -3104,7 +6748,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (adev->in_gpu_reset) { + } else if (amdgpu_in_reset(adev)) { /* reset mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); @@ -3146,12 +6790,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_gfx_rings; i++) kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); - r = amdgpu_ring_test_ring(kiq_ring); - if (r) { - DRM_ERROR("kfq enable failed\n"); - kiq_ring->sched.ready = false; - } - return r; + return amdgpu_ring_test_helper(kiq_ring); } #endif @@ -3194,6 +6833,19 @@ done: return r; } +static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } + } +} + static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3319,8 +6971,14 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; - /* activate the queue */ - mqd->cp_hqd_active = 1; + /* set static priority for a compute queue/ring */ + gfx_v10_0_compute_mqd_set_priority(ring, mqd); + + /* map_queues packet doesn't need activate the queue, + * so only kiq need set this field. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + mqd->cp_hqd_active = 1; return 0; } @@ -3331,6 +6989,10 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring) struct v10_compute_mqd *mqd = ring->mqd_ptr; int j; + /* inactivate the queue */ + if (amdgpu_sriov_vf(adev)) + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0); + /* disable wptr polling */ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); @@ -3439,7 +7101,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v10_0_kiq_setting(ring); - if (adev->in_gpu_reset) { /* for GPU_RESET case */ + if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); @@ -3475,7 +7137,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) struct v10_compute_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -3485,13 +7147,14 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -3591,23 +7254,16 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; - DRM_INFO("gfx %d ring me %d pipe %d q %d\n", - i, ring->me, ring->pipe, ring->queue); - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } } for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - ring->sched.ready = true; - DRM_INFO("compute ring %d mec %d pipe %d q %d\n", - i, ring->me, ring->pipe, ring->queue); - r = amdgpu_ring_test_ring(ring); + r = amdgpu_ring_test_helper(ring); if (r) - ring->sched.ready = false; + return r; } return 0; @@ -3625,18 +7281,39 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) /* check if mmVGT_ESGS_RING_SIZE_UMD * has been remapped to mmVGT_ESGS_RING_SIZE */ - data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE); - - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0); - - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); - - if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid); + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0); + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); + + if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) == pattern) { + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD , data); + return true; + } else { + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data); + return false; + } + break; + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): return true; - } else { - WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data); - return false; + default: + data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE); + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0); + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); + + if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) { + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data); + return true; + } else { + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data); + return false; + } + break; } } @@ -3644,65 +7321,145 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) { uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + /* initialize cam_index to 0 * index will auto-inc after each data writting */ WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); - /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */ - data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) << - GRBM_CAM_DATA__CAM_ADDR__SHIFT) | - (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) << - GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); - - /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */ - data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) << - GRBM_CAM_DATA__CAM_ADDR__SHIFT) | - (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) << - GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); - - /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */ - data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) << - GRBM_CAM_DATA__CAM_ADDR__SHIFT) | - (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) << - GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); - - /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */ - data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) << - GRBM_CAM_DATA__CAM_ADDR__SHIFT) | - (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) << - GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_Sienna_Cichlid) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_Sienna_Cichlid) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_Sienna_Cichlid) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_Sienna_Cichlid) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_Sienna_Cichlid) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */ + data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_Sienna_Cichlid) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + break; + default: + /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */ + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + + /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */ + data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) << + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | + (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) << + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); + break; + } - /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */ - data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) << - GRBM_CAM_DATA__CAM_ADDR__SHIFT) | - (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) << - GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); +} - /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */ - data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) << - GRBM_CAM_DATA__CAM_ADDR__SHIFT) | - (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) << - GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); +static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev) +{ + uint32_t data; + data = RREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG); + data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; + WREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG, data); - /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */ - data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) << - GRBM_CAM_DATA__CAM_ADDR__SHIFT) | - (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) << - GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); - WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); + data = RREG32_SOC15(GC, 0, mmCPG_PSP_DEBUG); + data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; + WREG32_SOC15(GC, 0, mmCPG_PSP_DEBUG, data); } static int gfx_v10_0_hw_init(void *handle) @@ -3719,15 +7476,12 @@ static int gfx_v10_0_hw_init(void *handle) * loaded firstly, so in direct type, it has to load smc ucode * here before rlc. */ - r = smu_load_microcode(&adev->smu); - if (r) - return r; - - r = smu_check_fw_status(&adev->smu); - if (r) { - pr_err("SMC firmware status is not correct\n"); - return r; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_pm_load_smu_firmware(adev, NULL); + if (r) + return r; } + gfx_v10_0_disable_gpa_mode(adev); } /* if GRBM CAM not remapped, set up the remapping */ @@ -3744,12 +7498,21 @@ static int gfx_v10_0_hw_init(void *handle) * init golden registers and rlc resume may override some registers, * reconfig them here */ - gfx_v10_0_tcp_harvest(adev); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) + gfx_v10_0_tcp_harvest(adev); r = gfx_v10_0_cp_resume(adev); if (r) return r; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + gfx_v10_3_program_pbb_mode(adev); + + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + gfx_v10_3_set_power_brake_sequence(adev); + return r; } @@ -3771,7 +7534,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev) kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], PREEMPT_QUEUES, 0, 0); - return amdgpu_ring_test_ring(kiq_ring); + return amdgpu_ring_test_helper(kiq_ring); } #endif @@ -3779,20 +7542,36 @@ static int gfx_v10_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; + uint32_t tmp; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + + if (!adev->no_hw_access) { #ifndef BRING_UP_DEBUG - if (amdgpu_async_gfx_ring) { - r = gfx_v10_0_kiq_disable_kgq(adev); - if (r) - DRM_ERROR("KGQ disable failed\n"); - } + if (amdgpu_async_gfx_ring) { + r = gfx_v10_0_kiq_disable_kgq(adev); + if (r) + DRM_ERROR("KGQ disable failed\n"); + } #endif - if (amdgpu_gfx_disable_kcq(adev)) - DRM_ERROR("KCQ disable failed\n"); + if (amdgpu_gfx_disable_kcq(adev)) + DRM_ERROR("KCQ disable failed\n"); + } + if (amdgpu_sriov_vf(adev)) { gfx_v10_0_cp_gfx_enable(adev, false); + /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { + tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); + tmp &= 0xffffff00; + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp); + } else { + tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); + tmp &= 0xffffff00; + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); + } + return 0; } gfx_v10_0_cp_enable(adev, false); @@ -3852,8 +7631,7 @@ static int gfx_v10_0_soft_reset(void *handle) GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | - GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK - | GRBM_STATUS__BCI_BUSY_MASK)) { + GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) { grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); @@ -3870,10 +7648,27 @@ static int gfx_v10_0_soft_reset(void *handle) /* GRBM_STATUS2 */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); - if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) - grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, - GRBM_SOFT_RESET, SOFT_RESET_RLC, - 1); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid)) + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, + SOFT_RESET_RLC, + 1); + break; + default: + if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, + SOFT_RESET_RLC, + 1); + break; + } if (grbm_soft_reset) { /* stop the rlc */ @@ -3907,13 +7702,30 @@ static int gfx_v10_0_soft_reset(void *handle) static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) { - uint64_t clock; + uint64_t clock, clock_lo, clock_hi, hi_check; - mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); - mutex_unlock(&adev->gfx.gpu_clock_mutex); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + break; + default: + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + } return clock; } @@ -3950,8 +7762,27 @@ static int gfx_v10_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X; + break; + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid; + break; + default: + break; + } + + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); gfx_v10_0_set_kiq_pm4_funcs(adev); gfx_v10_0_set_ring_funcs(adev); @@ -3994,13 +7825,35 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev) data = RLC_SAFE_MODE__CMD_MASK; data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); - WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); - /* wait for RLC_SAFE_MODE */ - for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) - break; - udelay(1); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); + + /* wait for RLC_SAFE_MODE */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid), + RLC_SAFE_MODE, CMD)) + break; + udelay(1); + } + break; + default: + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); + + /* wait for RLC_SAFE_MODE */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), + RLC_SAFE_MODE, CMD)) + break; + udelay(1); + } + break; } } @@ -4009,7 +7862,19 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev) uint32_t data; data = RLC_SAFE_MODE__CMD_MASK; - WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); + break; + default: + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); + break; + } } static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -4017,16 +7882,25 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t data, def; + if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) + return; + /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + /* 0 - Disable some blocks' MGCG */ + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000); + WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000); + WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000); + /* 1 - RLC_CGTT_MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); - - /* only for Vega10 & Raven1 */ - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK); if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); @@ -4048,29 +7922,32 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); } } - } else { + } else if (!enable || !(adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { /* 1 - MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK); if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); - /* 2 - disable MGLS in RLC */ + /* 2 - disable MGLS in CP */ + data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { + data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; + WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); + } + + /* 3 - disable MGLS in RLC */ data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); } - /* 3 - disable MGLS in CP */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); - if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { - data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); - } } } @@ -4079,22 +7956,34 @@ static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev, { uint32_t data, def; + if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS))) + return; + /* Enable 3D CGCG/CGLS */ - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { + if (enable) { /* write cmd to clear cgcg/cgls ov */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + /* unset CGCG override */ - data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; + /* update CGCG and CGLS override bits */ if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); + /* enable 3Dcgcg FSM(0x0000363f) */ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); - data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | - RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; + data = 0; + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) + data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; + if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); @@ -4107,9 +7996,14 @@ static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev, } else { /* Disable CGCG/CGLS */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); + /* disable cgcg, cgls should be disabled */ - data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK | - RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK); + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) + data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) + data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; + /* disable cgcg and cgls in FSM */ if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); @@ -4121,25 +8015,35 @@ static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { + if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS))) + return; + + if (enable) { def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + /* unset CGCG override */ - data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; - else - data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; + /* update CGCG and CGLS override bits */ if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); /* enable cgcg FSM(0x0000363F) */ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); - data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | - RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + data = 0; + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) + data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); @@ -4151,20 +8055,158 @@ static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); } else { def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); + /* reset CGCG/CGLS bits */ - data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) + data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) + data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + /* disable cgcg and cgls in FSM */ if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); } } +static void gfx_v10_0_update_fine_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) + return; + + if (enable) { + def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + /* unset FGCG override */ + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; + /* update FGCG override bits */ + if (def != data) + WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); + + def = data = RREG32_SOC15(GC, 0, mmRLC_CLK_CNTL); + /* unset RLC SRAM CLK GATER override */ + data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK; + /* update RLC SRAM CLK GATER override bits */ + if (def != data) + WREG32_SOC15(GC, 0, mmRLC_CLK_CNTL, data); + } else { + def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + /* reset FGCG bits */ + data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; + /* disable FGCG*/ + if (def != data) + WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); + + def = data = RREG32_SOC15(GC, 0, mmRLC_CLK_CNTL); + /* reset RLC SRAM CLK GATER bits */ + data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK; + /* disable RLC SRAM CLK*/ + if (def != data) + WREG32_SOC15(GC, 0, mmRLC_CLK_CNTL, data); + } +} + +static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_device *adev) +{ + uint32_t reg_data = 0; + uint32_t reg_idx = 0; + uint32_t i; + + const uint32_t tcp_ctrl_regs[] = { + mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP12_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP12_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP12_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP12_CU1_TCP_CTRL_REG + }; + + const uint32_t tcp_ctrl_regs_nv12[] = { + mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP00_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP01_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP01_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP02_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP02_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP10_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP10_CU1_TCP_CTRL_REG, + mmCGTS_SA0_WGP11_CU0_TCP_CTRL_REG, + mmCGTS_SA0_WGP11_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP00_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP00_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP01_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP01_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP02_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP02_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP10_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP10_CU1_TCP_CTRL_REG, + mmCGTS_SA1_WGP11_CU0_TCP_CTRL_REG, + mmCGTS_SA1_WGP11_CU1_TCP_CTRL_REG, + }; + + const uint32_t sm_ctlr_regs[] = { + mmCGTS_SA0_QUAD0_SM_CTRL_REG, + mmCGTS_SA0_QUAD1_SM_CTRL_REG, + mmCGTS_SA1_QUAD0_SM_CTRL_REG, + mmCGTS_SA1_QUAD1_SM_CTRL_REG + }; + + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { + for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) { + reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] + + tcp_ctrl_regs_nv12[i]; + reg_data = RREG32(reg_idx); + reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK; + WREG32(reg_idx, reg_data); + } + } else { + for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs); i++) { + reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] + + tcp_ctrl_regs[i]; + reg_data = RREG32(reg_idx); + reg_data |= CGTS_SA0_WGP00_CU0_TCP_CTRL_REG__TCPI_LS_OVERRIDE_MASK; + WREG32(reg_idx, reg_data); + } + } + + for (i = 0; i < ARRAY_SIZE(sm_ctlr_regs); i++) { + reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_QUAD0_SM_CTRL_REG_BASE_IDX] + + sm_ctlr_regs[i]; + reg_data = RREG32(reg_idx); + reg_data &= ~CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE_MASK; + reg_data |= 2 << CGTS_SA0_QUAD0_SM_CTRL_REG__SM_MODE__SHIFT; + WREG32(reg_idx, reg_data); + } +} + static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, bool enable) { amdgpu_gfx_rlc_enter_safe_mode(adev); if (enable) { + /* enable FGCG firstly*/ + gfx_v10_0_update_fine_grain_clock_gating(adev, enable); /* CGCG/CGLS should be enabled after MGCG/MGLS * === MGCG + MGLS === */ @@ -4173,6 +8215,11 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, gfx_v10_0_update_3d_clock_gating(adev, enable); /* === CGCG + CGLS === */ gfx_v10_0_update_coarse_grain_clock_gating(adev, enable); + + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))) + gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev); } else { /* CGCG/CGLS should be disabled before MGCG/MGLS * === CGCG + CGLS === @@ -4182,13 +8229,14 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, gfx_v10_0_update_3d_clock_gating(adev, enable); /* === MGCG + MGLS === */ gfx_v10_0_update_medium_grain_clock_gating(adev, enable); + /* disable fgcg at last*/ + gfx_v10_0_update_fine_grain_clock_gating(adev, enable); } if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGCG | - AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) gfx_v10_0_enable_gui_idle_interrupt(adev, enable); @@ -4198,6 +8246,98 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 reg, data; + /* not for *_SOC15 */ + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(reg); + else + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); + else + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, + uint32_t offset, + struct soc15_reg_rlcg *entries, int arr_size) +{ + int i; + uint32_t reg; + + if (!entries) + return false; + + for (i = 0; i < arr_size; i++) { + const struct soc15_reg_rlcg *entry; + + entry = &entries[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + if (offset == reg) + return true; + } + + return false; +} + +static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) +{ + return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0); +} + +static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) +{ + u32 data = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL); + + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) + data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + + WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, data); + + /* + * CGPG enablement required and the register to program the hysteresis value + * RLC_PG_DELAY_3.CGCG_ACTIVE_BEFORE_CGPG to the desired CGPG hysteresis value + * in refclk count. Note that RLC FW is modified to take 16 bits from + * RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits. + * + * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us as part) + * of CGPG enablement starting point. + * Power/performance team will optimize it and might give a new value later. + */ + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 1): + data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); + break; + case IP_VERSION(10, 3, 3): + data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); + break; + default: + break; + } + } +} + +static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable) +{ + amdgpu_gfx_rlc_enter_safe_mode(adev); + + gfx_v10_cntl_power_gating(adev, enable); + + amdgpu_gfx_rlc_exit_safe_mode(adev); +} + static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .is_rlc_enabled = gfx_v10_0_is_rlc_enabled, .set_safe_mode = gfx_v10_0_set_safe_mode, @@ -4208,22 +8348,50 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .resume = gfx_v10_0_rlc_resume, .stop = gfx_v10_0_rlc_stop, .reset = gfx_v10_0_rlc_reset, - .start = gfx_v10_0_rlc_start + .start = gfx_v10_0_rlc_start, + .update_spm_vmid = gfx_v10_0_update_spm_vmid, +}; + +static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = { + .is_rlc_enabled = gfx_v10_0_is_rlc_enabled, + .set_safe_mode = gfx_v10_0_set_safe_mode, + .unset_safe_mode = gfx_v10_0_unset_safe_mode, + .init = gfx_v10_0_rlc_init, + .get_csb_size = gfx_v10_0_get_csb_size, + .get_csb_buffer = gfx_v10_0_get_csb_buffer, + .resume = gfx_v10_0_rlc_resume, + .stop = gfx_v10_0_rlc_stop, + .reset = gfx_v10_0_rlc_reset, + .start = gfx_v10_0_rlc_start, + .update_spm_vmid = gfx_v10_0_update_spm_vmid, + .sriov_wreg = gfx_v10_sriov_wreg, + .sriov_rreg = gfx_v10_sriov_rreg, + .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range, }; static int gfx_v10_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else - amdgpu_gfx_off_ctrl(adev, true); + bool enable = (state == AMD_PG_STATE_GATE); + + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + amdgpu_gfx_off_ctrl(adev, enable); + break; + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): + gfx_v10_cntl_pg(adev, enable); + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; @@ -4236,12 +8404,21 @@ static int gfx_v10_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): gfx_v10_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4254,13 +8431,18 @@ static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int data; + /* AMD_CG_SUPPORT_GFX_FGCG */ + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE)); + if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_FGCG; + /* AMD_CG_SUPPORT_GFX_MGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE)); if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) *flags |= AMD_CG_SUPPORT_GFX_MGCG; /* AMD_CG_SUPPORT_GFX_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)); if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CGCG; @@ -4269,17 +8451,17 @@ static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags) *flags |= AMD_CG_SUPPORT_GFX_CGLS; /* AMD_CG_SUPPORT_GFX_RLC_LS */ - data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL)); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; /* AMD_CG_SUPPORT_GFX_CP_LS */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL)); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; /* AMD_CG_SUPPORT_GFX_3D_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; @@ -4397,15 +8579,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, control |= ib->length_dw | (vmid << 24); - if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { + if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); if (flags & AMDGPU_IB_PREEMPTED) control |= INDIRECT_BUFFER_PRE_RESUME(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v10_0_ring_emit_de_meta(ring, - flags & AMDGPU_IB_PREEMPTED ? true : false); + (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); } amdgpu_ring_write(ring, header); @@ -4457,14 +8639,9 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring, static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { - struct amdgpu_device *adev = ring->adev; bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; - /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */ - if (adev->pdev->device == 0x50) - int_sel = false; - /* RELEASE_MEM - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | @@ -4548,15 +8725,14 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0); } -static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) +static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, + uint32_t flags) { uint32_t dw2 = 0; - if (amdgpu_mcbp) + if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev)) gfx_v10_0_ring_emit_ce_meta(ring, - flags & AMDGPU_IB_PREEMPTED ? true : false); - - gfx_v10_0_ring_emit_tmz(ring, true); + (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ if (flags & AMDGPU_HAVE_CTX_SWITCH) { @@ -4616,12 +8792,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); return -ENOMEM; + } /* assert preemption condition */ amdgpu_ring_set_preempt_cond_exec(ring, false); @@ -4632,6 +8813,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) ++ring->trail_seq); amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + /* poll the trailing fence */ for (i = 0; i < adev->usec_timeout; i++) { if (ring->trail_seq == @@ -4714,13 +8897,17 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) sizeof(de_payload) >> 2); } -static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) +static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, + bool secure) { + uint32_t v = secure ? FRAME_TMZ : 0; + amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); - amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ + amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); } -static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) { struct amdgpu_device *adev = ring->adev; @@ -4731,9 +8918,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + reg_val_offs * 4)); } static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, @@ -4783,6 +8970,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } +static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, + unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); +} + static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -4809,16 +9009,16 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(cp_int_cntl_reg); + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 0); - WREG32(cp_int_cntl_reg, cp_int_cntl); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(cp_int_cntl_reg); + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 1); - WREG32(cp_int_cntl_reg, cp_int_cntl); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); break; default: break; @@ -4862,16 +9062,16 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); break; default: break; @@ -4985,6 +9185,7 @@ static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev, WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + break; default: break; } @@ -5066,20 +9267,20 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev, GENERIC2_INT_ENABLE, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); - tmp = RREG32(target); + tmp = RREG32_SOC15_IP(GC, target); tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, GENERIC2_INT_ENABLE, 0); - WREG32(target, tmp); + WREG32_SOC15_IP(GC, target, tmp); } else { tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, GENERIC2_INT_ENABLE, 1); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); - tmp = RREG32(target); + tmp = RREG32_SOC15_IP(GC, target); tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, GENERIC2_INT_ENABLE, 1); - WREG32(target, tmp); + WREG32_SOC15_IP(GC, target, tmp); } break; default: @@ -5106,6 +9307,29 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev, return 0; } +static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) +{ + const unsigned int gcr_cntl = + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); + + /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); + amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ + amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ + amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ +} + static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { .name = "gfx_v10_0", .early_init = gfx_v10_0_early_init, @@ -5153,7 +9377,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ - 2, /* SWITCH_BUFFER */ + 2 + /* SWITCH_BUFFER */ + 8, /* gfx_v10_0_emit_mem_sync */ .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ .emit_ib = gfx_v10_0_ring_emit_ib_gfx, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -5170,10 +9395,12 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec, .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec, .preempt_ib = gfx_v10_0_ring_preempt_ib, - .emit_tmz = gfx_v10_0_ring_emit_tmz, + .emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v10_0_ring_soft_recovery, + .emit_mem_sync = gfx_v10_0_emit_mem_sync, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { @@ -5193,7 +9420,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ - 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ + 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ + 8, /* gfx_v10_0_emit_mem_sync */ .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */ .emit_ib = gfx_v10_0_ring_emit_ib_compute, .emit_fence = gfx_v10_0_ring_emit_fence, @@ -5208,6 +9436,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .emit_mem_sync = gfx_v10_0_emit_mem_sync, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { @@ -5291,12 +9520,21 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs; break; + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 3, 0): + adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov; + break; default: break; } @@ -5330,17 +9568,22 @@ static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device * static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) { - u32 data, wgp_bitmask; - data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG); - data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG); + u32 disabled_mask = + ~amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); + u32 efuse_setting = 0; + u32 vbios_setting = 0; + + efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG); + efuse_setting &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; + efuse_setting >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; - data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; - data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; + vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG); + vbios_setting &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; + vbios_setting >>= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; - wgp_bitmask = - amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); + disabled_mask |= efuse_setting | vbios_setting; - return (~data) & wgp_bitmask; + return (~disabled_mask); } static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) @@ -5376,6 +9619,11 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + bitmap = i * adev->gfx.config.max_sh_per_se + j; + if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) && + ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) + continue; mask = 1; ao_bitmap = 0; counter = 0; @@ -5410,6 +9658,72 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, return 0; } +static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev) +{ + uint32_t efuse_setting, vbios_setting, disabled_sa, max_sa_mask; + + efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE); + efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK; + efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE); + vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK; + vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + max_sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines); + disabled_sa = efuse_setting | vbios_setting; + disabled_sa &= max_sa_mask; + + return disabled_sa; +} + +static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev) +{ + uint32_t max_sa_per_se, max_sa_per_se_mask, max_shader_engines; + uint32_t disabled_sa_mask, se_index, disabled_sa_per_se; + + disabled_sa_mask = gfx_v10_3_get_disabled_sa(adev); + + max_sa_per_se = adev->gfx.config.max_sh_per_se; + max_sa_per_se_mask = (1 << max_sa_per_se) - 1; + max_shader_engines = adev->gfx.config.max_shader_engines; + + for (se_index = 0; max_shader_engines > se_index; se_index++) { + disabled_sa_per_se = disabled_sa_mask >> (se_index * max_sa_per_se); + disabled_sa_per_se &= max_sa_per_se_mask; + if (disabled_sa_per_se == max_sa_per_se_mask) { + WREG32_FIELD15(GC, 0, PA_SC_ENHANCE_3, FORCE_PBB_WORKLOAD_MODE_TO_ZERO, 1); + break; + } + } +} + +static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev) +{ + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, + (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) | + (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) | + (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT)); + + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, + (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) | + (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) | + (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) | + (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT)); + + WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid, + (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) | + (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) | + (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT)); + + WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL); + + WREG32_SOC15(GC, 0, mmDIDT_IND_DATA, + (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT)); +} + const struct amdgpu_ip_block_version gfx_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 7f0a63628c43..6a8dadea40f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1576,7 +1576,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev) static void gfx_v6_0_constants_init(struct amdgpu_device *adev) { u32 gb_addr_config = 0; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 sx_debug_1; u32 hdp_host_path_cntl; u32 tmp; @@ -1678,7 +1678,6 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev) WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; @@ -1895,6 +1894,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, * gfx_v6_0_ring_test_ib - basic ring IB test * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Allocate an IB and execute it on the gfx ring (SI). * Provides a basic gfx ring test to verify that IBs are working. @@ -1915,7 +1915,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -1951,7 +1952,6 @@ err1: static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; if (enable) { WREG32(mmCP_ME_CNTL, 0); } else { @@ -1959,10 +1959,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); WREG32(mmSCRATCH_UMSK, 0); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; } udelay(50); } @@ -3031,6 +3027,7 @@ static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -3069,7 +3066,8 @@ static int gfx_v6_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + GFX6_NUM_COMPUTE_RINGS); adev->gfx.funcs = &gfx_v6_0_gfx_funcs; adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs; gfx_v6_0_set_ring_funcs(adev); @@ -3115,7 +3113,9 @@ static int gfx_v6_0_sw_init(void *handle) ring->ring_obj = NULL; sprintf(ring->name, "gfx"); r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -3137,7 +3137,8 @@ static int gfx_v6_0_sw_init(void *handle) sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -3467,6 +3468,18 @@ static int gfx_v6_0_set_powergating_state(void *handle, return 0; } +static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ +} + static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { .name = "gfx_v6_0", .early_init = gfx_v6_0_early_init, @@ -3497,7 +3510,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ - 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */ + 3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */ + 5, /* SURFACE_SYNC */ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ .emit_ib = gfx_v6_0_ring_emit_ib, .emit_fence = gfx_v6_0_ring_emit_fence, @@ -3508,6 +3522,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { .insert_nop = amdgpu_ring_insert_nop, .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, .emit_wreg = gfx_v6_0_ring_emit_wreg, + .emit_mem_sync = gfx_v6_0_emit_mem_sync, }; static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { @@ -3521,7 +3536,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { 5 + 5 + /* hdp flush / invalidate */ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */ - 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ + 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ + 5, /* SURFACE_SYNC */ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ .emit_ib = gfx_v6_0_ring_emit_ib, .emit_fence = gfx_v6_0_ring_emit_fence, @@ -3531,6 +3547,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { .test_ib = gfx_v6_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .emit_wreg = gfx_v6_0_ring_emit_wreg, + .emit_mem_sync = gfx_v6_0_emit_mem_sync, }; static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d92e92e5d50b..37b4a3db6360 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1580,10 +1580,10 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @se_num: shader engine to address * @sh_num: sh block to address + * @instance: Certain registers are instanced per SE or SH. + * 0xffffffff means broadcast to all SEs or SHs (CIK). * - * Select which SE, SH combinations to address. Certain - * registers are instanced per SE or SH. 0xffffffff means - * broadcast to all SEs or SHs (CIK). + * Select which SE, SH combinations to address. */ static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) @@ -1779,8 +1779,6 @@ gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, * gfx_v7_0_setup_rb - setup the RBs on the asic * * @adev: amdgpu_device pointer - * @se_num: number of SEs (shader engines) for the asic - * @sh_per_se: number of SH blocks per SE for the asic * * Configures per-SE/SH RB registers (CIK). */ @@ -1841,6 +1839,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } +#define DEFAULT_SH_MEM_BASES (0x6000) /** * gfx_v7_0_init_compute_vmid - gart enable * @@ -1849,9 +1848,6 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) * Initialize compute vmid sh_mem registers * */ -#define DEFAULT_SH_MEM_BASES (0x6000) -#define FIRST_COMPUTE_VMID (8) -#define LAST_COMPUTE_VMID (16) static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) { int i; @@ -1869,7 +1865,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT; mutex_lock(&adev->srbm_mutex); - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { cik_srbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32(mmSH_MEM_CONFIG, sh_mem_config); @@ -1881,8 +1877,8 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + access. These should be enabled by FW for target VMIDs. */ + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); WREG32(amdgpu_gds_reg_offset[i].gws, 0); @@ -1900,7 +1896,7 @@ static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev) * the driver can enable them for graphics. VMID0 should maintain * access so that HWS firmware can save/restore entries. */ - for (vmid = 1; vmid < 16; vmid++) { + for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0); WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0); WREG32(amdgpu_gds_reg_offset[vmid].gws, 0); @@ -2062,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * * Set up the number and offset of the CP scratch registers. - * NOTE: use of CP scratch registers is a legacy inferface and + * NOTE: use of CP scratch registers is a legacy interface and * is not used by default on newer asics (r6xx+). On newer asics, * memory buffers are used for fences rather than scratch regs. */ @@ -2076,7 +2072,6 @@ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) /** * gfx_v7_0_ring_test_ring - basic gfx ring test * - * @adev: amdgpu_device pointer * @ring: amdgpu_ring structure holding ring information * * Allocate a scratch register and write to it using the gfx ring (CIK). @@ -2121,10 +2116,9 @@ error_free_scratch: } /** - * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp + * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp * - * @adev: amdgpu_device pointer - * @ridx: amdgpu ring index + * @ring: amdgpu_ring structure holding ring information * * Emits an hdp flush on the cp. */ @@ -2173,10 +2167,12 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) /** * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring * - * @adev: amdgpu_device pointer - * @fence: amdgpu fence object + * @ring: amdgpu_ring structure holding ring information + * @addr: address + * @seq: sequence number + * @flags: fence related flags * - * Emits a fence sequnce number on the gfx ring and flushes + * Emits a fence sequence number on the gfx ring and flushes * GPU caches. */ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, @@ -2214,10 +2210,12 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, /** * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring * - * @adev: amdgpu_device pointer - * @fence: amdgpu fence object + * @ring: amdgpu_ring structure holding ring information + * @addr: address + * @seq: sequence number + * @flags: fence related flags * - * Emits a fence sequnce number on the compute ring and flushes + * Emits a fence sequence number on the compute ring and flushes * GPU caches. */ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, @@ -2244,15 +2242,17 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, * IB stuff */ /** - * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring + * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring * * @ring: amdgpu_ring structure holding ring information + * @job: job to retrieve vmid from * @ib: amdgpu indirect buffer object + * @flags: options (AMDGPU_HAVE_CTX_SWITCH) * * Emits an DE (drawing engine) or CE (constant engine) IB * on the gfx ring. IBs are usually generated by userspace * acceleration drivers and submitted to the kernel for - * sheduling on the ring. This function schedules the IB + * scheduling on the ring. This function schedules the IB * on the gfx ring for execution by the GPU. */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, @@ -2344,6 +2344,7 @@ static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) * gfx_v7_0_ring_test_ib - basic ring IB test * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Allocate an IB and execute it on the gfx ring (CIK). * Provides a basic gfx ring test to verify that IBs are working. @@ -2364,7 +2365,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -2400,7 +2402,7 @@ err1: /* * CP. - * On CIK, gfx and compute now have independant command processors. + * On CIK, gfx and compute now have independent command processors. * * GFX * Gfx consists of a single ring and can process both gfx jobs and @@ -2431,15 +2433,12 @@ err1: */ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; - - if (enable) { + if (enable) WREG32(mmCP_ME_CNTL, 0); - } else { - WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - } + else + WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | + CP_ME_CNTL__PFP_HALT_MASK | + CP_ME_CNTL__CE_HALT_MASK)); udelay(50); } @@ -2631,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) ring->wptr = 0; WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); @@ -2700,15 +2699,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) */ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - - if (enable) { + if (enable) WREG32(mmCP_MEC_CNTL, 0); - } else { - WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; - } + else + WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); udelay(50); } @@ -2990,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; mqd->cp_hqd_pq_rptr_report_addr_hi = @@ -3045,7 +3040,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, mqd->cp_hqd_active = 1; } -int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd) +static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd) { uint32_t tmp; uint32_t mqd_reg; @@ -3201,9 +3196,9 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) } /** - * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP + * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP * - * @ring: the ring to emmit the commands to + * @ring: the ring to emit the commands to * * Sync the command pipeline with the PFP. E.g. wait for everything * to be completed. @@ -3225,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 4); /* poll interval */ if (usepfp) { - /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + /* sync CE with ME to prevent CE fetch CEIB before context switch done */ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); @@ -3242,7 +3237,9 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) /** * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP * - * @adev: amdgpu_device pointer + * @ring: amdgpu_ring pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using the CP (CIK). @@ -3346,6 +3343,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -3570,6 +3571,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) return 0; } +static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); +} + static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp, tmp2; @@ -4185,6 +4198,7 @@ static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -4221,7 +4235,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .resume = gfx_v7_0_rlc_resume, .stop = gfx_v7_0_rlc_stop, .reset = gfx_v7_0_rlc_reset, - .start = gfx_v7_0_rlc_start + .start = gfx_v7_0_rlc_start, + .update_spm_vmid = gfx_v7_0_update_spm_vmid }; static int gfx_v7_0_early_init(void *handle) @@ -4229,7 +4244,8 @@ static int gfx_v7_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); adev->gfx.funcs = &gfx_v7_0_gfx_funcs; adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; gfx_v7_0_set_ring_funcs(adev); @@ -4258,7 +4274,7 @@ static int gfx_v7_0_late_init(void *handle) static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; @@ -4335,10 +4351,14 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) break; } - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { @@ -4418,7 +4438,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -4490,7 +4511,9 @@ static int gfx_v7_0_sw_init(void *handle) ring->ring_obj = NULL; sprintf(ring->name, "gfx"); r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -4980,6 +5003,32 @@ static int gfx_v7_0_set_powergating_state(void *handle, return 0; } +static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ +} + +static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ +} + static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .name = "gfx_v7_0", .early_init = gfx_v7_0_early_init, @@ -5012,7 +5061,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ - 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ + 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ + 5, /* SURFACE_SYNC */ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ .emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx, @@ -5027,6 +5077,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, .emit_wreg = gfx_v7_0_ring_emit_wreg, .soft_recovery = gfx_v7_0_ring_soft_recovery, + .emit_mem_sync = gfx_v7_0_emit_mem_sync, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { @@ -5043,7 +5094,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { 5 + /* hdp invalidate */ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */ - 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ + 7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ + 7, /* gfx_v7_0_emit_mem_sync_compute */ .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */ .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, @@ -5056,6 +5108,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v7_0_ring_emit_wreg, + .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute, }; static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5161,15 +5214,6 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) cu_info->lds_size = 64; } -const struct amdgpu_ip_block_version gfx_v7_0_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_GFX, - .major = 7, - .minor = 0, - .rev = 0, - .funcs = &gfx_v7_0_ip_funcs, -}; - const struct amdgpu_ip_block_version gfx_v7_1_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index 6fb9c1524691..eedce7d007f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h @@ -24,7 +24,6 @@ #ifndef __GFX_V7_0_H__ #define __GFX_V7_0_H__ -extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block; extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block; extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block; extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 52a647d7022d..e0302c23e9a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" +#include "amdgpu_ring.h" #include "vi.h" #include "vi_structs.h" #include "vid.h" @@ -729,8 +730,13 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev); static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring); static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring); +#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x0000007fL +#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x00000000L + static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) { + uint32_t data; + switch (adev->asic_type) { case CHIP_TOPAZ: amdgpu_device_program_register_sequence(adev, @@ -790,11 +796,14 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) amdgpu_device_program_register_sequence(adev, polaris10_golden_common_all, ARRAY_SIZE(polaris10_golden_common_all)); - WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); - if (adev->pdev->revision == 0xc7 && + data = RREG32_SMC(ixCG_ACLK_CNTL); + data &= ~CG_ACLK_CNTL__ACLK_DIVIDER_MASK; + data |= 0x18 << CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT; + WREG32_SMC(ixCG_ACLK_CNTL, data); + if ((adev->pdev->device == 0x67DF) && (adev->pdev->revision == 0xc7) && ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) || - (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) { + (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1680))) { amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); } @@ -888,7 +897,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) gpu_addr = adev->wb.gpu_addr + (index * 4); adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 16, &ib); + r = amdgpu_ib_get(adev, NULL, 16, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -1318,6 +1328,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1338,21 +1352,22 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + return r; + } - r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.mec.hpd_eop_obj, - &adev->gfx.mec.hpd_eop_gpu_addr, - (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - - memset(hpd, 0, mec_hpd_size); + memset(hpd, 0, mec_hpd_size); - amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } return 0; } @@ -1546,7 +1561,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* allocate an indirect buffer to put the commands in */ memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, total_size, &ib); + r = amdgpu_ib_get(adev, NULL, total_size, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; @@ -1677,7 +1693,7 @@ fail: static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; int ret; @@ -1817,10 +1833,14 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) break; } - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { @@ -1884,6 +1904,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int r; unsigned irq_type; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; + unsigned int hw_prio; ring = &adev->gfx.compute_ring[ring_id]; @@ -1903,9 +1924,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT; /* type-2 packets are deprecated on MEC, use type-3 instead */ - r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + hw_prio, NULL); if (r) return r; @@ -2009,7 +2032,8 @@ static int gfx_v8_0_sw_init(void *handle) } r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, - AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -3236,7 +3260,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) dev_warn(adev->dev, "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n", adev->asic_type); - /* fall through */ + fallthrough; case CHIP_CARRIZO: modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | @@ -3663,6 +3687,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } +#define DEFAULT_SH_MEM_BASES (0x6000) /** * gfx_v8_0_init_compute_vmid - gart enable * @@ -3671,9 +3696,6 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) * Initialize compute vmid sh_mem registers * */ -#define DEFAULT_SH_MEM_BASES (0x6000) -#define FIRST_COMPUTE_VMID (8) -#define LAST_COMPUTE_VMID (16) static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) { int i; @@ -3696,7 +3718,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) SH_MEM_CONFIG__PRIVATE_ATC_MASK; mutex_lock(&adev->srbm_mutex); - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { vi_srbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32(mmSH_MEM_CONFIG, sh_mem_config); @@ -3709,7 +3731,7 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) /* Initialize all compute VMIDs to have no GDS, GWS, or OA acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); WREG32(amdgpu_gds_reg_offset[i].gws, 0); @@ -3727,7 +3749,7 @@ static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev) * the driver can enable them for graphics. VMID0 should maintain * access so that HWS firmware can save/restore entries. */ - for (vmid = 1; vmid < 16; vmid++) { + for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0); WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0); WREG32(amdgpu_gds_reg_offset[vmid].gws, 0); @@ -4112,7 +4134,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; u32 tmp = RREG32(mmCP_ME_CNTL); if (enable) { @@ -4123,8 +4144,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; } WREG32(mmCP_ME_CNTL, tmp); udelay(50); @@ -4312,14 +4331,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - if (enable) { WREG32(mmCP_MEC_CNTL, 0); } else { WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; adev->gfx.kiq.ring.sched.ready = false; } udelay(50); @@ -4422,6 +4437,19 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) return r; } +static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } + } +} + static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -4545,9 +4573,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) /* defaults */ mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR); mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR); - mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY); - mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY); - mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO); mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI); mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET); @@ -4559,13 +4584,20 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM); mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES); - /* activate the queue */ - mqd->cp_hqd_active = 1; + /* set static priority for a queue/ring */ + gfx_v8_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + + /* map_queues packet doesn't need activate the queue, + * so only kiq need set this field. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + mqd->cp_hqd_active = 1; return 0; } -int gfx_v8_0_mqd_commit(struct amdgpu_device *adev, +static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev, struct vi_mqd *mqd) { uint32_t mqd_reg; @@ -4610,7 +4642,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v8_0_kiq_setting(ring); - if (adev->in_gpu_reset) { /* for GPU_RESET case */ + if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); @@ -4647,7 +4679,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -4659,7 +4691,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); - } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); @@ -5035,7 +5067,7 @@ static int gfx_v8_0_pre_soft_reset(void *handle) gfx_v8_0_cp_compute_enable(adev, false); } - return 0; + return 0; } static int gfx_v8_0_soft_reset(void *handle) @@ -5247,6 +5279,7 @@ static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -5272,7 +5305,8 @@ static int gfx_v8_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); adev->gfx.funcs = &gfx_v8_0_gfx_funcs; gfx_v8_0_set_ring_funcs(adev); gfx_v8_0_set_irq_funcs(adev); @@ -5320,10 +5354,9 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - if (((adev->asic_type == CHIP_POLARIS11) || + if ((adev->asic_type == CHIP_POLARIS11) || (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) && - adev->powerplay.pp_funcs->set_powergating_by_smu) + (adev->asic_type == CHIP_VEGAM)) /* Send msg to SMU via Powerplay */ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable); @@ -5587,6 +5620,24 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) } } +static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(mmRLC_SPM_VMID); + else + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_NO_KIQ(mmRLC_SPM_VMID, data); + else + WREG32(mmRLC_SPM_VMID, data); +} + static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, .set_safe_mode = gfx_v8_0_set_safe_mode, @@ -5598,7 +5649,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .resume = gfx_v8_0_rlc_resume, .stop = gfx_v8_0_rlc_stop, .reset = gfx_v8_0_rlc_reset, - .start = gfx_v8_0_rlc_start + .start = gfx_v8_0_rlc_start, + .update_spm_vmid = gfx_v8_0_update_spm_vmid }; static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -5838,8 +5890,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_CG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { @@ -5860,8 +5911,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_MG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } return 0; @@ -5890,8 +5940,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_CG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) { @@ -5910,8 +5959,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_3D, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { @@ -5932,8 +5980,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_MG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { @@ -5948,8 +5995,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_RLC, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { @@ -5963,8 +6009,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_CP, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } return 0; @@ -6092,7 +6137,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v8_0_ring_emit_de_meta(ring); } @@ -6234,104 +6279,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring) WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } -static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} -static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v8_0_hqd_set_priority(adev, ring, acquire); - gfx_v8_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) @@ -6444,7 +6391,8 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne ring->ring[offset] = (ring->ring_size >> 2) - offset + cur; } -static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) { struct amdgpu_device *adev = ring->adev; @@ -6455,9 +6403,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + reg_val_offs * 4)); } static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, @@ -6771,7 +6719,8 @@ static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev, return 0; } -static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data) +static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data, + bool from_wq) { u32 enc, se_id, sh_id, cu_id; char type[20]; @@ -6809,7 +6758,7 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data) * or from BH in which case we can access SQ_EDC_INFO * instance */ - if (in_task()) { + if (from_wq) { mutex_lock(&adev->grbm_idx_mutex); gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id); @@ -6847,7 +6796,7 @@ static void gfx_v8_0_sq_irq_work_func(struct work_struct *work) struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work); struct sq_work *sq_work = container_of(work, struct sq_work, work); - gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data); + gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true); } static int gfx_v8_0_sq_irq(struct amdgpu_device *adev, @@ -6862,7 +6811,7 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev, * just print whatever info is possible directly from the ISR. */ if (work_pending(&adev->gfx.sq_work.work)) { - gfx_v8_0_parse_sq_irq(adev, ih_data); + gfx_v8_0_parse_sq_irq(adev, ih_data, false); } else { adev->gfx.sq_work.ih_data = ih_data; schedule_work(&adev->gfx.sq_work.work); @@ -6871,6 +6820,94 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev, return 0; } +static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA | + PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ +} + +static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA | + PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ +} + + +/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ +#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f +static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring, + uint32_t pipe, bool enable) +{ + uint32_t val; + uint32_t wcl_cs_reg; + + val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT; + + switch (pipe) { + case 0: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0; + break; + case 1: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1; + break; + case 2: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2; + break; + case 3: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3; + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + + amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); + +} + +#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff +static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + int i; + + /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit + * number of gfx waves. Setting 5 bit will make sure gfx only gets + * around 25% of gpu resources. + */ + val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT; + amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val); + + /* Restrict waves for normal/low priority compute queues as well + * to get best QoS for high priority compute jobs. + * + * amdgpu controls only 1st ME(0-3 CS pipes). + */ + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + if (i != ring->pipe) + gfx_v8_0_emit_wave_limit_cs(ring, i, enable); + + } + +} + static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6917,7 +6954,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ 12 + 12 + /* FENCE x2 */ - 2, /* SWITCH_BUFFER */ + 2 + /* SWITCH_BUFFER */ + 5, /* SURFACE_SYNC */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx, @@ -6935,6 +6973,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec, .emit_wreg = gfx_v8_0_ring_emit_wreg, .soft_recovery = gfx_v8_0_ring_soft_recovery, + .emit_mem_sync = gfx_v8_0_emit_mem_sync, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { @@ -6951,7 +6990,10 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { 5 + /* hdp_invalidate */ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */ - 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ + 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ + 7 + /* gfx_v8_0_emit_mem_sync_compute */ + 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ + 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */ .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, @@ -6963,8 +7005,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v8_0_ring_set_priority_compute, .emit_wreg = gfx_v8_0_ring_emit_wreg, + .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute, + .emit_wave_limit = gfx_v8_0_emit_wave_limit, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 97105a5bb246..7f944bb11298 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -38,7 +38,6 @@ #include "gc/gc_9_0_sh_mask.h" #include "vega10_enum.h" -#include "hdp/hdp_4_0_offset.h" #include "soc15_common.h" #include "clearstate_gfx9.h" @@ -48,18 +47,19 @@ #include "amdgpu_ras.h" +#include "gfx_v9_4.h" +#include "gfx_v9_0.h" +#include "gfx_v9_4_2.h" + +#include "asic_reg/pwr/pwr_10_0_offset.h" +#include "asic_reg/pwr/pwr_10_0_sh_mask.h" +#include "asic_reg/gc/gc_9_0_default.h" + #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L -#define mmPWR_MISC_CNTL_STATUS 0x0183 -#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 -#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 -#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 -#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L -#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L - #define mmGCEA_PROBE_MAP 0x070c #define mmGCEA_PROBE_MAP_BASE_IDX 0 @@ -108,16 +108,25 @@ MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin"); MODULE_FIRMWARE("amdgpu/arcturus_mec.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin"); MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin"); MODULE_FIRMWARE("amdgpu/renoir_ce.bin"); MODULE_FIRMWARE("amdgpu/renoir_pfp.bin"); MODULE_FIRMWARE("amdgpu/renoir_me.bin"); MODULE_FIRMWARE("amdgpu/renoir_mec.bin"); -MODULE_FIRMWARE("amdgpu/renoir_mec2.bin"); MODULE_FIRMWARE("amdgpu/renoir_rlc.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_me.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); + #define mmTCP_CHAN_STEER_0_ARCT 0x0b03 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0 #define mmTCP_CHAN_STEER_1_ARCT 0x0b04 @@ -131,18 +140,6 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 -struct ras_gfx_subblock_reg { - const char *name; - uint32_t hwip; - uint32_t inst; - uint32_t seg; - uint32_t reg_offset; - uint32_t sec_count_mask; - uint32_t sec_count_shift; - uint32_t ded_count_mask; - uint32_t ded_count_shift; -}; - enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -521,8 +518,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), @@ -705,6 +702,12 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000) +}; + +static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = { + {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)}, + {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)}, }; static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = @@ -731,6 +734,75 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, }; +static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) +{ + static void *scratch_reg0; + static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; + static void *spare_int; + static uint32_t grbm_cntl; + static uint32_t grbm_idx; + + scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; + scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; + scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4; + scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4; + spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; + + grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; + grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; + + if (amdgpu_sriov_runtime(adev)) { + pr_err("shouldn't call rlcg write register during runtime\n"); + return; + } + + if (offset == grbm_cntl || offset == grbm_idx) { + if (offset == grbm_cntl) + writel(v, scratch_reg2); + else if (offset == grbm_idx) + writel(v, scratch_reg3); + + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + } else { + uint32_t i = 0; + uint32_t retries = 50000; + + writel(v, scratch_reg0); + writel(offset | 0x80000000, scratch_reg1); + writel(1, spare_int); + for (i = 0; i < retries; i++) { + u32 tmp; + + tmp = readl(scratch_reg1); + if (!(tmp & 0x80000000)) + break; + + udelay(10); + } + if (i >= retries) + pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); + } + +} + +static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset, + u32 v, u32 acc_flags, u32 hwip) +{ + if ((acc_flags & AMDGPU_REGS_RLC) && + amdgpu_sriov_fullaccess(adev)) { + gfx_v9_0_rlcg_w(adev, offset, v, acc_flags); + + return; + } + + if (acc_flags & AMDGPU_REGS_NO_KIQ) + WREG32_NO_KIQ(offset, v); + else + WREG32(offset, v); +} + #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 @@ -741,20 +813,148 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev); static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, - struct amdgpu_cu_info *cu_info); + struct amdgpu_cu_info *cu_info); static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); -static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); + +static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, + uint64_t queue_mask) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); + amdgpu_ring_write(kiq_ring, + PACKET3_SET_RESOURCES_VMID_MASK(0) | + /* vmid_mask:0* queue_type:0 (KIQ) */ + PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); + amdgpu_ring_write(kiq_ring, + lower_32_bits(queue_mask)); /* queue mask lo */ + amdgpu_ring_write(kiq_ring, + upper_32_bits(queue_mask)); /* queue mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* oac mask */ + amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ +} + +static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(ring->queue) | + PACKET3_MAP_QUEUES_PIPE(ring->pipe) | + PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | + /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | + /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | + PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | + /* num_queues: must be 1 */ + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, + PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); +} + +static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + enum amdgpu_unmap_queues_action action, + u64 gpu_addr, u64 seq) +{ + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_UNMAP_QUEUES_ACTION(action) | + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | + PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, + PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + + if (action == PREEMPT_QUEUES_NO_UNMAP) { + amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, seq); + } else { + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } +} + +static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + u64 addr, + u64 seq) +{ + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_QUERY_STATUS_CONTEXT_ID(0) | + PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | + PACKET3_QUERY_STATUS_COMMAND(2)); + /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + amdgpu_ring_write(kiq_ring, + PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | + PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); + amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); + amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); +} + +static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(kiq_ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(1) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); +} + +static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = { + .kiq_set_resources = gfx_v9_0_kiq_set_resources, + .kiq_map_queues = gfx_v9_0_kiq_map_queues, + .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues, + .kiq_query_status = gfx_v9_0_kiq_query_status, + .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs, + .set_resources_size = 8, + .map_queues_size = 7, + .unmap_queues_size = 6, + .query_status_size = 7, + .invalidate_tlbs_size = 2, +}; + +static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) +{ + adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs; +} static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): soc15_program_register_sequence(adev, golden_settings_gc_9_0, ARRAY_SIZE(golden_settings_gc_9_0)); @@ -762,7 +962,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_0_vg10, ARRAY_SIZE(golden_settings_gc_9_0_vg10)); break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): soc15_program_register_sequence(adev, golden_settings_gc_9_2_1, ARRAY_SIZE(golden_settings_gc_9_2_1)); @@ -770,7 +970,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_2_1_vg12, ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): soc15_program_register_sequence(adev, golden_settings_gc_9_0, ARRAY_SIZE(golden_settings_gc_9_0)); @@ -778,15 +978,16 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_0_vg20, ARRAY_SIZE(golden_settings_gc_9_0_vg20)); break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): soc15_program_register_sequence(adev, golden_settings_gc_9_4_1_arct, ARRAY_SIZE(golden_settings_gc_9_4_1_arct)); break; - case CHIP_RAVEN: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): soc15_program_register_sequence(adev, golden_settings_gc_9_1, ARRAY_SIZE(golden_settings_gc_9_1)); - if (adev->rev_id >= 8) + if (adev->apu_flags & AMD_APU_IS_RAVEN2) soc15_program_register_sequence(adev, golden_settings_gc_9_1_rv2, ARRAY_SIZE(golden_settings_gc_9_1_rv2)); @@ -795,16 +996,21 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_1_rv1, ARRAY_SIZE(golden_settings_gc_9_1_rv1)); break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): soc15_program_register_sequence(adev, golden_settings_gc_9_1_rn, ARRAY_SIZE(golden_settings_gc_9_1_rn)); return; /* for renoir, don't need common goldensetting */ + case IP_VERSION(9, 4, 2): + gfx_v9_4_2_init_golden_registers(adev, + adev->smuio.funcs->get_die_id(adev)); + break; default: break; } - if (adev->asic_type != CHIP_ARCTURUS) + if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && + (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))) soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); } @@ -905,7 +1111,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) gpu_addr = adev->wb.gpu_addr + (index * 4); adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 16, &ib); + r = amdgpu_ib_get(adev, NULL, 16, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err1; @@ -987,15 +1194,15 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.me_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false; - if ((adev->gfx.mec_fw_version < 0x000001a5) || + if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && + ((adev->gfx.mec_fw_version < 0x000001a5) || (adev->gfx.mec_feature_version < 46) || (adev->gfx.pfp_fw_version < 0x000000b7) || - (adev->gfx.pfp_feature_version < 46)) - DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ - GRBM requires 1-cycle delay in cp firmware\n"); + (adev->gfx.pfp_feature_version < 46))) + DRM_WARN_ONCE("CP firmware version too old, please update!"); - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42) && (adev->gfx.pfp_fw_version >= 0x000000b1) && @@ -1006,7 +1213,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 42)) adev->gfx.mec_fw_write_wait = true; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 44) && (adev->gfx.pfp_fw_version >= 0x000000b2) && @@ -1017,7 +1224,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 44)) adev->gfx.mec_fw_write_wait = true; break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 44) && (adev->gfx.pfp_fw_version >= 0x000000b2) && @@ -1028,7 +1235,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 44)) adev->gfx.mec_fw_write_wait = true; break; - case CHIP_RAVEN: + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42) && (adev->gfx.pfp_fw_version >= 0x000000b1) && @@ -1040,22 +1248,83 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.mec_fw_write_wait = true; break; default: + adev->gfx.me_fw_write_wait = true; + adev->gfx.mec_fw_write_wait = true; break; } } +struct amdgpu_gfxoff_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */ + { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, + /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev) +{ + const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + +static bool is_raven_kicker(struct amdgpu_device *adev) +{ + if (adev->pm.fw_version >= 0x41e2b) + return true; + else + return false; +} + +static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev) +{ + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) && + (adev->gfx.me_fw_version >= 0x000000a5) && + (adev->gfx.me_feature_version >= 52)) + return true; + else + return false; +} + static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: + if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): break; - case CHIP_RAVEN: - if (!(adev->rev_id >= 0x8 || - adev->pdev->device == 0x15d8) && - (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ - !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) || + (adev->apu_flags & AMD_APU_IS_PICASSO)) && + ((!is_raven_kicker(adev) && + adev->gfx.rlc_fw_version < 531) || + (adev->gfx.rlc_feature_version < 1) || + !adev->gfx.rlc.is_rlc_v2_1)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (adev->pm.pp_feature & PP_GFXOFF_MASK) @@ -1063,7 +1332,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): if (adev->pm.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_CP | @@ -1285,6 +1554,16 @@ out: return err; } +static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) +{ + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) + return false; + + return true; +} + static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, const char *chip_name) { @@ -1306,21 +1585,26 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); - if (!err) { - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); - if (err) - goto out; - cp_hdr = (const struct gfx_firmware_header_v1_0 *) - adev->gfx.mec2_fw->data; - adev->gfx.mec2_fw_version = - le32_to_cpu(cp_hdr->header.ucode_version); - adev->gfx.mec2_feature_version = - le32_to_cpu(cp_hdr->ucode_feature_version); + if (gfx_v9_0_load_mec2_fw_bin_support(adev)) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + if (!err) { + err = amdgpu_ucode_validate(adev->gfx.mec2_fw); + if (err) + goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw_version = + le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = + le32_to_cpu(cp_hdr->ucode_feature_version); + } else { + err = 0; + adev->gfx.mec2_fw = NULL; + } } else { - err = 0; - adev->gfx.mec2_fw = NULL; + adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; + adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; } if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { @@ -1349,8 +1633,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, /* TODO: Determine if MEC2 JT FW loading can be removed for all GFX V9 asic and above */ - if (adev->asic_type != CHIP_ARCTURUS && - adev->asic_type != CHIP_RENOIR) { + if (gfx_v9_0_load_mec2_fw_bin_support(adev)) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; info->fw = adev->gfx.mec2_fw; @@ -1383,36 +1666,43 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): chip_name = "vega10"; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): chip_name = "vega12"; break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): chip_name = "vega20"; break; - case CHIP_RAVEN: - if (adev->rev_id >= 8) + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + if (adev->apu_flags & AMD_APU_IS_RAVEN2) chip_name = "raven2"; - else if (adev->pdev->device == 0x15d8) + else if (adev->apu_flags & AMD_APU_IS_PICASSO) chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): chip_name = "arcturus"; break; - case CHIP_RENOIR: - chip_name = "renoir"; + case IP_VERSION(9, 3, 0): + if (adev->apu_flags & AMD_APU_IS_RENOIR) + chip_name = "renoir"; + else + chip_name = "green_sardine"; + break; + case IP_VERSION(9, 4, 2): + chip_name = "aldebaran"; break; default: BUG(); } /* No CPG in Arcturus */ - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->gfx.num_gfx_rings) { r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name); if (r) return r; @@ -1508,7 +1798,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) always_on_cu_num = 4; - else if (adev->asic_type == CHIP_VEGA12) + else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1)) always_on_cu_num = 8; else always_on_cu_num = 12; @@ -1647,7 +1937,10 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev) { - return 5; + if (gfx_v9_0_load_mec2_fw_bin_support(adev)) + return 5; + else + return 4; } static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) @@ -1666,7 +1959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return r; } - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) { + if (adev->flags & AMD_IS_APU) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ r = amdgpu_gfx_rlc_init_cpt(adev); @@ -1674,17 +1967,22 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return r; } - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): gfx_v9_0_init_lbpw(adev); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): gfx_v9_4_init_lbpw(adev); break; default: break; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1710,29 +2008,30 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + gfx_v9_0_mec_fini(adev); + return r; + } - r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.mec.hpd_eop_obj, - &adev->gfx.mec.hpd_eop_gpu_addr, - (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - - memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); + memset(hpd, 0, mec_hpd_size); - amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; fw_data = (const __le32 *) (adev->gfx.mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); - fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; + fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, @@ -1755,7 +2054,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) { - WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (address << SQ_IND_INDEX__INDEX__SHIFT) | @@ -1767,7 +2066,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t regno, uint32_t num, uint32_t *out) { - WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (regno << SQ_IND_INDEX__INDEX__SHIFT) | @@ -1796,6 +2095,7 @@ static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -1824,14 +2124,20 @@ static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, } static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { - .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, - .select_se_sh = &gfx_v9_0_select_se_sh, - .read_wave_data = &gfx_v9_0_read_wave_data, - .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, - .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, - .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, + .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v9_0_select_se_sh, + .read_wave_data = &gfx_v9_0_read_wave_data, + .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, + .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, + .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, +}; + +static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = { + .ras_late_init = amdgpu_gfx_ras_late_init, + .ras_fini = amdgpu_gfx_ras_fini, .ras_error_inject = &gfx_v9_0_ras_error_inject, - .query_ras_error_count = &gfx_v9_0_query_ras_error_count + .query_ras_error_count = &gfx_v9_0_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, }; static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -1841,8 +2147,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.funcs = &gfx_v9_0_gfx_funcs; - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1850,7 +2156,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1859,7 +2165,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; DRM_INFO("fix gfx.config for vega12\n"); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): + adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1873,18 +2180,20 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) if (err) return err; break; - case CHIP_RAVEN: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; - if (adev->rev_id >= 8) + if (adev->apu_flags & AMD_APU_IS_RAVEN2) gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN; else gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): + adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1894,7 +2203,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config &= ~0xf3e777ff; gb_addr_config |= 0x22014042; break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1904,6 +2213,21 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config &= ~0xf3e777ff; gb_addr_config |= 0x22010042; break; + case IP_VERSION(9, 4, 2): + adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs; + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); + gb_addr_config &= ~0xf3e777ff; + gb_addr_config |= 0x22014042; + /* check vbios table if gpu info is not available */ + err = amdgpu_atomfirmware_get_gfx_info(adev); + if (err) + return err; + break; default: BUG(); break; @@ -1952,9 +2276,9 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) { - int r; unsigned irq_type; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; + unsigned int hw_prio; ring = &adev->gfx.compute_ring[ring_id]; @@ -1973,15 +2297,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ - r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type); - if (r) - return r; - - - return 0; + return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + hw_prio, NULL); } static int gfx_v9_0_sw_init(void *handle) @@ -1991,13 +2311,15 @@ static int gfx_v9_0_sw_init(void *handle) struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): adev->gfx.mec.num_mec = 2; break; default: @@ -2069,8 +2391,9 @@ static int gfx_v9_0_sw_init(void *handle) sprintf(ring->name, "gfx_%d", i); ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; - r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, + AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -2125,7 +2448,9 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_gfx_ras_fini(adev); + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->ras_fini) + adev->gfx.ras_funcs->ras_fini(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -2138,7 +2463,7 @@ static int gfx_v9_0_sw_fini(void *handle) gfx_v9_0_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) { + if (adev->flags & AMD_IS_APU) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -2154,7 +2479,8 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) /* TODO */ } -static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) +void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, + u32 instance) { u32 data; @@ -2217,8 +2543,6 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) } #define DEFAULT_SH_MEM_BASES (0x6000) -#define FIRST_COMPUTE_VMID (8) -#define LAST_COMPUTE_VMID (16) static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) { int i; @@ -2238,7 +2562,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; mutex_lock(&adev->srbm_mutex); - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { soc15_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); @@ -2249,7 +2573,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) /* Initialize all compute VMIDs to have no GDS, GWS, or OA acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); @@ -2267,7 +2591,7 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev) * the driver can enable them for graphics. VMID0 should maintain * access so that HWS firmware can save/restore entries. */ - for (vmid = 1; vmid < 16; vmid++) { + for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0); @@ -2275,6 +2599,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev) } } +static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) +{ + uint32_t tmp; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 1): + tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); + tmp = REG_SET_FIELD(tmp, SQ_CONFIG, + DISABLE_BARRIER_WAITCNT, 1); + WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp); + break; + default: + break; + } +} + static void gfx_v9_0_constants_init(struct amdgpu_device *adev) { u32 tmp; @@ -2298,14 +2638,14 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, - !!amdgpu_noretry); + !!adev->gmc.noretry); WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp); WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0); } else { tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, - !!amdgpu_noretry); + !!adev->gmc.noretry); WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp); tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, (adev->gmc.private_aperture_start >> 48)); @@ -2320,6 +2660,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) gfx_v9_0_init_compute_vmid(adev); gfx_v9_0_init_gds_vmid(adev); + gfx_v9_0_init_sq_config(adev); } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) @@ -2363,12 +2704,17 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, bool enable) { - u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); + u32 tmp; + + /* These interrupts should be enabled to drive DS clock */ + + tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); - tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); + if(adev->gfx.num_gfx_rings) + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); } @@ -2537,7 +2883,7 @@ static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev, uint32_t default_data = 0; default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS)); - if (enable == true) { + if (enable) { /* enable GFXIP control over CGPG */ data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; if(default_data != data) @@ -2593,8 +2939,8 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); - - pwr_10_0_gfxip_control_over_cgpg(adev, true); + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0)) + pwr_10_0_gfxip_control_over_cgpg(adev, true); } } @@ -2705,9 +3051,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - if (adev->asic_type == CHIP_VEGA12 || - (adev->asic_type == CHIP_RAVEN && - adev->rev_id >= 8)) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); } @@ -2724,7 +3069,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) } } -void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) { WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v9_0_enable_gui_idle_interrupt(adev, false); @@ -2819,14 +3164,15 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): if (amdgpu_lbpw == 0) gfx_v9_0_enable_lbpw(adev, false); else gfx_v9_0_enable_lbpw(adev, true); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): if (amdgpu_lbpw > 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -2843,16 +3189,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { - int i; u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (!enable) { - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - adev->gfx.gfx_ring[i].sched.ready = false; - } WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); udelay(50); } @@ -3048,15 +3389,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { - int i; - if (enable) { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0); } else { WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); - for (i = 0; i < adev->gfx.num_compute_rings; i++) - adev->gfx.compute_ring[i].sched.ready = false; adev->gfx.kiq.ring.sched.ready = false; } udelay(50); @@ -3119,72 +3456,17 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp); } -static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) +static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - uint64_t queue_mask = 0; - int r, i; - - for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) - continue; + struct amdgpu_device *adev = ring->adev; - /* This situation may be hit in the future if a new HW - * generation exposes more than 64 queues. If so, the - * definition of queue_mask needs updating */ - if (WARN_ON(i >= (sizeof(queue_mask)*8))) { - DRM_ERROR("Invalid KCQ enabled: %d\n", i); - break; + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; } - - queue_mask |= (1ull << i); } - - r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - return r; - } - - /* set resources */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); - amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | - PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ - amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ - amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ - amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ - amdgpu_ring_write(kiq_ring, 0); /* oac mask */ - amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); - uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); - /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ - amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ - PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ - PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ - PACKET3_MAP_QUEUES_QUEUE(ring->queue) | - PACKET3_MAP_QUEUES_PIPE(ring->pipe) | - PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | - PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ - PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ - PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ - PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ - amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); - amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); - amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); - amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); - amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); - } - - r = amdgpu_ring_test_helper(kiq_ring); - if (r) - DRM_ERROR("KCQ enable failed\n"); - - return r; } static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) @@ -3323,8 +3605,15 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; - /* activate the queue */ - mqd->cp_hqd_active = 1; + /* set static priority for a queue/ring */ + gfx_v9_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM); + + /* map_queues packet doesn't need activate the queue, + * so only kiq need set this field. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + mqd->cp_hqd_active = 1; return 0; } @@ -3405,7 +3694,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) if (ring->use_doorbell) { WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, (adev->doorbell_index.kiq * 2) << 2); - WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, + /* If GC has entered CGPG, ringing doorbell > first page + * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to + * workaround this issue. And this change has to align with firmware + * update. + */ + if (check_if_enlarge_doorbell_range(adev)) + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, + (adev->doorbell.size - 4)); + else + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, (adev->doorbell_index.userqueue_end * 2) << 2); } @@ -3478,11 +3776,18 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; + struct v9_mqd *tmp_mqd; gfx_v9_0_kiq_setting(ring); - if (adev->in_gpu_reset) { /* for GPU_RESET case */ - /* reset MQD to a clean status */ + /* GPU could be in bad state during probe, driver trigger the reset + * after load the SMU, in this case , the mqd is not be initialized. + * driver need to re-init the mqd. + * check mqd->cp_hqd_pq_control since this value should not be 0 + */ + tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; + if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){ + /* for GPU_RESET case , reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); @@ -3518,8 +3823,15 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; + struct v9_mqd *tmp_mqd; + + /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control + * is not be initialized before + */ + tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!tmp_mqd->cp_hqd_pq_control || + (!amdgpu_in_reset(adev) && !adev->in_suspend)) { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -3531,13 +3843,14 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); - } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); @@ -3593,7 +3906,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = gfx_v9_0_kiq_kcq_enable(adev); + r = amdgpu_gfx_enable_kcq(adev); done: return r; } @@ -3607,7 +3920,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) gfx_v9_0_enable_gui_idle_interrupt(adev, false); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->gfx.num_gfx_rings) { /* legacy firmware loading */ r = gfx_v9_0_cp_gfx_load_microcode(adev); if (r) @@ -3623,7 +3936,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) if (r) return r; - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->gfx.num_gfx_rings) { r = gfx_v9_0_cp_gfx_resume(adev); if (r) return r; @@ -3633,7 +3946,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) if (r) return r; - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->gfx.num_gfx_rings) { ring = &adev->gfx.gfx_ring[0]; r = amdgpu_ring_test_helper(ring); if (r) @@ -3650,9 +3963,27 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return 0; } +static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev) +{ + u32 tmp; + + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) && + adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)) + return; + + tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG); + tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH, + adev->df.hash_status.hash_64k); + tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH, + adev->df.hash_status.hash_2m); + tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH, + adev->df.hash_status.hash_1g); + WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp); +} + static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) { - if (adev->asic_type != CHIP_ARCTURUS) + if (adev->gfx.num_gfx_rings) gfx_v9_0_cp_gfx_enable(adev, enable); gfx_v9_0_cp_compute_enable(adev, enable); } @@ -3667,6 +3998,8 @@ static int gfx_v9_0_hw_init(void *handle) gfx_v9_0_constants_init(adev); + gfx_v9_0_init_tcp_config(adev); + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3675,35 +4008,8 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - return r; -} - -static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev) -{ - int r, i; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - - r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); - if (r) - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); - amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ - PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ - PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | - PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | - PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); - amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - } - r = amdgpu_ring_test_helper(kiq_ring); - if (r) - DRM_ERROR("KCQ disable failed\n"); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + gfx_v9_4_2_set_power_brake_sequence(adev); return r; } @@ -3719,7 +4025,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* DF freeze and kcq disable will fail */ if (!amdgpu_ras_intr_triggered()) /* disable KCQ to avoid CPC touch memory not valid anymore */ - gfx_v9_0_kcq_disable(adev); + amdgpu_gfx_disable_kcq(adev); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); @@ -3735,7 +4041,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* Use deinitialize sequence from CAIL when unbinding device from driver, * otherwise KIQ is hanging when binding back */ - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, adev->gfx.kiq.ring.me, adev->gfx.kiq.ring.pipe, @@ -3746,8 +4052,14 @@ static int gfx_v9_0_hw_fini(void *handle) } gfx_v9_0_cp_enable(adev, false); - adev->gfx.rlc.funcs->stop(adev); + /* Skip suspend with A+A reset */ + if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { + dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + return 0; + } + + adev->gfx.rlc.funcs->stop(adev); return 0; } @@ -3821,7 +4133,7 @@ static int gfx_v9_0_soft_reset(void *handle) /* stop the rlc */ adev->gfx.rlc.funcs->stop(adev); - if (adev->asic_type != CHIP_ARCTURUS) + if (adev->gfx.num_gfx_rings) /* Disable GFX parsing/prefetching */ gfx_v9_0_cp_gfx_enable(adev, false); @@ -3848,28 +4160,95 @@ static int gfx_v9_0_soft_reset(void *handle) return 0; } +static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq, reg_val_offs = 0; + uint64_t value = 0; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + if (amdgpu_device_wb_get(adev, ®_val_offs)) { + pr_err("critical bug! too many kiq readers\n"); + goto failed_unlock; + } + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 9 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 16) | /* count sel */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + reg_val_offs * 4)); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (amdgpu_in_reset(adev))) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + mb(); + value = (uint64_t)adev->wb.wb[reg_val_offs] | + (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL; + amdgpu_device_wb_free(adev, reg_val_offs); + return value; + +failed_undo: + amdgpu_ring_undo(ring); +failed_unlock: + spin_unlock_irqrestore(&kiq->ring_lock, flags); +failed_kiq_read: + if (reg_val_offs) + amdgpu_device_wb_free(adev, reg_val_offs); + pr_err("failed to read gpu clock\n"); + return ~0; +} + static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; + amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { - uint32_t tmp, lsb, msb, i = 0; - do { - if (i != 0) - udelay(1); - tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB); - msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - i++; - } while (unlikely(tmp != msb) && (i < adev->usec_timeout)); - clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { + clock = gfx_v9_0_kiq_read_clock(adev); } else { WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); } mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); return clock; } @@ -3936,33 +4315,173 @@ static const u32 sgpr_init_compute_shader[] = 0xbe800080, 0xbf810000, }; +static const u32 vgpr_init_compute_shader_arcturus[] = { + 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080, + 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080, + 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080, + 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080, + 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080, + 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080, + 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080, + 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080, + 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080, + 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080, + 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080, + 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080, + 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080, + 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080, + 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080, + 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080, + 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080, + 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080, + 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080, + 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080, + 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080, + 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080, + 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080, + 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080, + 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080, + 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080, + 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080, + 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080, + 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080, + 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080, + 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080, + 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080, + 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080, + 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080, + 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080, + 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080, + 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080, + 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080, + 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080, + 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080, + 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080, + 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080, + 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080, + 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080, + 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080, + 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080, + 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080, + 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080, + 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080, + 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080, + 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080, + 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080, + 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080, + 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080, + 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080, + 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080, + 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080, + 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080, + 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080, + 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080, + 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080, + 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080, + 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080, + 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080, + 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080, + 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080, + 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080, + 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080, + 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080, + 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080, + 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080, + 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080, + 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080, + 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080, + 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080, + 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080, + 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080, + 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080, + 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080, + 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080, + 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080, + 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080, + 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080, + 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080, + 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080, + 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a, + 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280, + 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000, + 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904, + 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000, + 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a, + 0xbf84fff8, 0xbf810000, +}; + +/* When below register arrays changed, please update gpr_reg_size, + and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds, + to cover all gfx9 ASICs */ static const struct soc15_reg_entry vgpr_init_regs[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */ - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */ - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, }; -static const struct soc15_reg_entry sgpr_init_regs[] = { +static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */ - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +static const struct soc15_reg_entry sgpr1_init_regs[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff }, }; -static const struct soc15_reg_entry sec_ded_counter_registers[] = { +static const struct soc15_reg_entry sgpr2_init_regs[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 }, +}; + +static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1}, @@ -4050,10 +4569,19 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; struct amdgpu_ib ib; struct dma_fence *f = NULL; - int r, i, j, k; + int r, i; unsigned total_size, vgpr_offset, sgpr_offset; u64 gpu_addr; + int compute_dim_x = adev->gfx.config.max_shader_engines * + adev->gfx.config.max_cu_per_sh * + adev->gfx.config.max_sh_per_se; + int sgpr_work_group_size = 5; + int gpr_reg_size = adev->gfx.config.max_shader_engines + 6; + int vgpr_init_shader_size; + const u32 *vgpr_init_shader_ptr; + const struct soc15_reg_entry *vgpr_init_regs_ptr; + /* only support when RAS is enabled */ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return 0; @@ -4062,27 +4590,40 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!ring->sched.ready) return 0; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { + vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; + vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); + vgpr_init_regs_ptr = vgpr_init_regs_arcturus; + } else { + vgpr_init_shader_ptr = vgpr_init_compute_shader; + vgpr_init_shader_size = sizeof(vgpr_init_compute_shader); + vgpr_init_regs_ptr = vgpr_init_regs; + } + total_size = - ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4; + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */ total_size += - ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4; + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */ + total_size += + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */ total_size = ALIGN(total_size, 256); vgpr_offset = total_size; - total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); + total_size += ALIGN(vgpr_init_shader_size, 256); sgpr_offset = total_size; total_size += sizeof(sgpr_init_compute_shader); /* allocate an indirect buffer to put the commands in */ memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, total_size, &ib); + r = amdgpu_ib_get(adev, NULL, total_size, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); return r; } /* load the compute shaders */ - for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++) - ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i]; + for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++) + ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i]; for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++) ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i]; @@ -4092,11 +4633,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* VGPR */ /* write the register state for the compute dispatch */ - for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) { + for (i = 0; i < gpr_reg_size; i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); - ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i]) + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i]) - PACKET3_SET_SH_REG_START; - ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value; + ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value; } /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8; @@ -4108,7 +4649,35 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 128; /* x */ + ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */ + ib.ptr[ib.length_dw++] = 1; /* y */ + ib.ptr[ib.length_dw++] = 1; /* z */ + ib.ptr[ib.length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* write CS partial flush packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); + ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); + + /* SGPR1 */ + /* write the register state for the compute dispatch */ + for (i = 0; i < gpr_reg_size; i++) { + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i]) + - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value; + } + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO) + - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); + ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); + + /* write dispatch packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4118,13 +4687,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); - /* SGPR */ + /* SGPR2 */ /* write the register state for the compute dispatch */ - for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) { + for (i = 0; i < gpr_reg_size; i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); - ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i]) + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i]) - PACKET3_SET_SH_REG_START; - ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value; + ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value; } /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; @@ -4136,7 +4705,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 128; /* x */ + ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4160,19 +4729,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) goto fail; } - /* read back registers to clear the counters */ - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { - gfx_v9_0_select_se_sh(adev, j, 0x0, k); - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); - } - } - } - WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); - mutex_unlock(&adev->grbm_idx_mutex); - fail: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); @@ -4184,11 +4740,14 @@ static int gfx_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); + gfx_v9_0_set_kiq_pm4_funcs(adev); gfx_v9_0_set_ring_funcs(adev); gfx_v9_0_set_irq_funcs(adev); gfx_v9_0_set_gds_init(adev); @@ -4202,19 +4761,39 @@ static int gfx_v9_0_ecc_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - r = amdgpu_gfx_ras_late_init(adev); - if (r) - return r; - - r = gfx_v9_0_do_edc_gds_workarounds(adev); - if (r) - return r; + /* + * Temp workaround to fix the issue that CP firmware fails to + * update read pointer when CPDMA is writing clearing operation + * to GDS in suspend/resume sequence on several cards. So just + * limit this operation in cold boot sequence. + */ + if ((!adev->in_suspend) && + (adev->gds.gds_size)) { + r = gfx_v9_0_do_edc_gds_workarounds(adev); + if (r) + return r; + } /* requires IBs so do in late init after IB pool is initialized */ - r = gfx_v9_0_do_edc_gpr_workarounds(adev); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + r = gfx_v9_4_2_do_edc_gpr_workarounds(adev); + else + r = gfx_v9_0_do_edc_gpr_workarounds(adev); + if (r) return r; + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->ras_late_init) { + r = adev->gfx.ras_funcs->ras_late_init(adev); + if (r) + return r; + } + + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->enable_watchdog_timer) + adev->gfx.ras_funcs->enable_watchdog_timer(adev); + return 0; } @@ -4324,7 +4903,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* 1 - RLC_CGTT_MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); - if (adev->asic_type != CHIP_VEGA12) + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | @@ -4358,7 +4937,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* 1 - MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); - if (adev->asic_type != CHIP_VEGA12) + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | @@ -4392,13 +4971,13 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, { uint32_t data, def; - if (adev->asic_type == CHIP_ARCTURUS) + if (!adev->gfx.num_gfx_rings) return; amdgpu_gfx_rlc_enter_safe_mode(adev); /* Enable 3D CGCG/CGLS */ - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { + if (enable) { /* write cmd to clear cgcg/cgls ov */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); /* unset CGCG override */ @@ -4410,8 +4989,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, /* enable 3Dcgcg FSM(0x0000363f) */ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); - data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | - RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) + data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; + else + data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; @@ -4460,7 +5043,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev /* enable cgcg FSM(0x0000363F) */ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); - if (adev->asic_type == CHIP_ARCTURUS) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; else @@ -4515,6 +5098,54 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 reg, data; + + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(reg); + else + data = RREG32(reg); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); + else + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + +static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, + uint32_t offset, + struct soc15_reg_rlcg *entries, int arr_size) +{ + int i; + uint32_t reg; + + if (!entries) + return false; + + for (i = 0; i < arr_size; i++) { + const struct soc15_reg_rlcg *entry; + + entry = &entries[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + if (offset == reg) + return true; + } + + return false; +} + +static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) +{ + return gfx_v9_0_check_rlcg_range(adev, offset, + (void *)rlcg_access_gc_9_0, + ARRAY_SIZE(rlcg_access_gc_9_0)); +} + static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, .set_safe_mode = gfx_v9_0_set_safe_mode, @@ -4526,22 +5157,25 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .resume = gfx_v9_0_rlc_resume, .stop = gfx_v9_0_rlc_stop, .reset = gfx_v9_0_rlc_reset, - .start = gfx_v9_0_rlc_start + .start = gfx_v9_0_rlc_start, + .update_spm_vmid = gfx_v9_0_update_spm_vmid, + .sriov_wreg = gfx_v9_0_sriov_wreg, + .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range, }; static int gfx_v9_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: - if (!enable) { + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 3, 0): + if (!enable) amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); @@ -4564,13 +5198,8 @@ static int gfx_v9_0_set_powergating_state(void *handle, if (enable) amdgpu_gfx_off_ctrl(adev, true); break; - case CHIP_VEGA12: - if (!enable) { - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - } else { - amdgpu_gfx_off_ctrl(adev, true); - } + case IP_VERSION(9, 2, 1): + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; @@ -4587,15 +5216,17 @@ static int gfx_v9_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): gfx_v9_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4612,12 +5243,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags = 0; /* AMD_CG_SUPPORT_GFX_MGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE)); if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) *flags |= AMD_CG_SUPPORT_GFX_MGCG; /* AMD_CG_SUPPORT_GFX_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)); if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CGCG; @@ -4626,18 +5257,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags |= AMD_CG_SUPPORT_GFX_CGLS; /* AMD_CG_SUPPORT_GFX_RLC_LS */ - data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL)); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; /* AMD_CG_SUPPORT_GFX_CP_LS */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL)); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) { /* AMD_CG_SUPPORT_GFX_3D_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; @@ -4674,7 +5305,7 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */ - atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); WDOORBELL64(ring->doorbell_index, ring->wptr); } else { WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); @@ -4729,7 +5360,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v9_0_ring_emit_de_meta(ring); } @@ -4854,112 +5485,13 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) return wptr; } -static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - soc15_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} - -static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v9_0_hqd_set_priority(adev, ring, acquire); - gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; /* XXX check if swapping is necessary on BE */ if (ring->use_doorbell) { - atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); WDOORBELL64(ring->doorbell_index, ring->wptr); } else{ BUG(); /* only DOORBELL method supported on gfx9 now */ @@ -5040,10 +5572,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); } -static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) +static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, + bool secure) { + uint32_t v = secure ? FRAME_TMZ : 0; + amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); - amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ + amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); } static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) @@ -5053,8 +5588,6 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) if (amdgpu_sriov_vf(ring->adev)) gfx_v9_0_ring_emit_ce_meta(ring); - gfx_v9_0_ring_emit_tmz(ring, true); - dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ if (flags & AMDGPU_HAVE_CTX_SWITCH) { /* set load_global_config & load_global_uconfig */ @@ -5105,7 +5638,8 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne ring->ring[offset] = (ring->ring_size>>2) - offset + cur; } -static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) { struct amdgpu_device *adev = ring->adev; @@ -5116,9 +5650,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + reg_val_offs * 4)); } static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, @@ -5277,6 +5811,7 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev, WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + break; default: break; } @@ -5440,7 +5975,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, } -static const struct ras_gfx_subblock_reg ras_subblock_regs[] = { +static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = { { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) @@ -5888,7 +6423,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, int ret; struct ta_ras_trigger_error_input block_info = { 0 }; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks)) @@ -6013,73 +6548,76 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); - for (i = 0; i < 16; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - vml2_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, vml2_mems[i], sec_count); err_data->ce_count += sec_count; } ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, - vml2_mems[i], ded_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "DED %d\n", i, vml2_mems[i], ded_count); err_data->ue_count += ded_count; } } - for (i = 0; i < 7; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, SEC_COUNT); if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - vml2_walker_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, vml2_walker_mems[i], sec_count); err_data->ce_count += sec_count; } ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, DED_COUNT); if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, - vml2_walker_mems[i], ded_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "DED %d\n", i, vml2_walker_mems[i], ded_count); err_data->ue_count += ded_count; } } - for (i = 0; i < 4; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); sec_count = (data & 0x00006000L) >> 0xd; if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - atc_l2_cache_2m_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, atc_l2_cache_2m_mems[i], + sec_count); err_data->ce_count += sec_count; } } - for (i = 0; i < 32; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); sec_count = (data & 0x00006000L) >> 0xd; if (sec_count) { - DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, - atc_l2_cache_4k_mems[i], sec_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "SEC %d\n", i, atc_l2_cache_4k_mems[i], + sec_count); err_data->ce_count += sec_count; } ded_count = (data & 0x00018000L) >> 0xf; if (ded_count) { - DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, - atc_l2_cache_4k_mems[i], ded_count); + dev_info(adev->dev, "Instance[%d]: SubBlock %s, " + "DED %d\n", i, atc_l2_cache_4k_mems[i], + ded_count); err_data->ue_count += ded_count; } } @@ -6092,36 +6630,39 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, return 0; } -static int __get_ras_error_count(const struct soc15_reg_entry *reg, +static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, uint32_t se_id, uint32_t inst_id, uint32_t value, uint32_t *sec_count, uint32_t *ded_count) { uint32_t i; uint32_t sec_cnt, ded_cnt; - for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) { - if(ras_subblock_regs[i].reg_offset != reg->reg_offset || - ras_subblock_regs[i].seg != reg->seg || - ras_subblock_regs[i].inst != reg->inst) + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) { + if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_0_ras_fields[i].seg != reg->seg || + gfx_v9_0_ras_fields[i].inst != reg->inst) continue; sec_cnt = (value & - ras_subblock_regs[i].sec_count_mask) >> - ras_subblock_regs[i].sec_count_shift; + gfx_v9_0_ras_fields[i].sec_count_mask) >> + gfx_v9_0_ras_fields[i].sec_count_shift; if (sec_cnt) { - DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", - ras_subblock_regs[i].name, + dev_info(adev->dev, "GFX SubBlock %s, " + "Instance[%d][%d], SEC %d\n", + gfx_v9_0_ras_fields[i].name, se_id, inst_id, sec_cnt); *sec_count += sec_cnt; } ded_cnt = (value & - ras_subblock_regs[i].ded_count_mask) >> - ras_subblock_regs[i].ded_count_shift; + gfx_v9_0_ras_fields[i].ded_count_mask) >> + gfx_v9_0_ras_fields[i].ded_count_shift; if (ded_cnt) { - DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", - ras_subblock_regs[i].name, + dev_info(adev->dev, "GFX SubBlock %s, " + "Instance[%d][%d], DED %d\n", + gfx_v9_0_ras_fields[i].name, se_id, inst_id, ded_cnt); *ded_count += ded_cnt; @@ -6131,6 +6672,61 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg, return 0; } +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i, j, k; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + + /* read back registers to clear the counters */ + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { + gfx_v9_0_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); + } + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); +} + static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { @@ -6139,7 +6735,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, uint32_t i, j, k; uint32_t reg_value; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; err_data->ue_count = 0; @@ -6147,16 +6743,17 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { gfx_v9_0_select_se_sh(adev, j, 0, k); reg_value = - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) - __get_ras_error_count(&sec_ded_counter_registers[i], - j, k, reg_value, - &sec_count, &ded_count); + gfx_v9_0_ras_error_count(adev, + &gfx_v9_0_edc_counter_regs[i], + j, k, reg_value, + &sec_count, &ded_count); } } } @@ -6172,6 +6769,84 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, return 0; } +static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring) +{ + const unsigned int cp_coher_cntl = + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); + + /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); + amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ + amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ +} + +static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring, + uint32_t pipe, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + uint32_t wcl_cs_reg; + + /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ + val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT; + + switch (pipe) { + case 0: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0); + break; + case 1: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1); + break; + case 2: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2); + break; + case 3: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3); + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + + amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); + +} +static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + int i; + + + /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit + * number of gfx waves. Setting 5 bit will make sure gfx only gets + * around 25% of gpu resources. + */ + val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT; + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX), + val); + + /* Restrict waves for normal/low priority compute queues as well + * to get best QoS for high priority compute jobs. + * + * amdgpu controls only 1st ME(0-3 CS pipes). + */ + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + if (i != ring->pipe) + gfx_v9_0_emit_wave_limit_cs(ring, i, enable); + + } +} + static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, @@ -6218,7 +6893,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ - 2, /* SWITCH_BUFFER */ + 2 + /* SWITCH_BUFFER */ + 7, /* gfx_v9_0_emit_mem_sync */ .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ .emit_ib = gfx_v9_0_ring_emit_ib_gfx, .emit_fence = gfx_v9_0_ring_emit_fence, @@ -6234,11 +6910,12 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, - .emit_tmz = gfx_v9_0_ring_emit_tmz, + .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, + .emit_mem_sync = gfx_v9_0_emit_mem_sync, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { @@ -6258,7 +6935,10 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v9_0_ring_emit_vm_flush */ - 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ + 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ + 7 + /* gfx_v9_0_emit_mem_sync */ + 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ + 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_fence = gfx_v9_0_ring_emit_fence, @@ -6270,10 +6950,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .test_ib = gfx_v9_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v9_0_ring_set_priority_compute, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .emit_mem_sync = gfx_v9_0_emit_mem_sync, + .emit_wave_limit = gfx_v9_0_emit_wave_limit, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { @@ -6356,13 +7037,15 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; default: @@ -6373,38 +7056,50 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): adev->gds.gds_size = 0x10000; break; - case CHIP_RAVEN: - case CHIP_ARCTURUS: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): adev->gds.gds_size = 0x1000; break; + case IP_VERSION(9, 4, 2): + /* aldebaran removed all the GDS internal memory, + * only support GWS opcode in kernel, like barrier + * semaphore.etc */ + adev->gds.gds_size = 0; + break; default: adev->gds.gds_size = 0x10000; break; } - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA20: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 4, 0): adev->gds.gds_compute_max_wave_id = 0x7ff; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): adev->gds.gds_compute_max_wave_id = 0x27f; break; - case CHIP_RAVEN: - if (adev->rev_id >= 0x8) + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + if (adev->apu_flags & AMD_APU_IS_RAVEN2) adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */ else adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */ break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->gds.gds_compute_max_wave_id = 0xfff; break; + case IP_VERSION(9, 4, 2): + /* deprecated for Aldebaran, no usage at all */ + adev->gds.gds_compute_max_wave_id = 0; + break; default: /* this really depends on the chip */ adev->gds.gds_compute_max_wave_id = 0x7ff; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index fa5a3fbaf6ab..dfe8d4841f58 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h @@ -26,9 +26,7 @@ extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; -void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - -uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); -int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); +void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, + u32 instance); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c new file mode 100644 index 000000000000..b4789dfc2bb9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -0,0 +1,1039 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/kernel.h> + +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "soc15.h" +#include "soc15d.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_pm.h" + +#include "gc/gc_9_4_1_offset.h" +#include "gc/gc_9_4_1_sh_mask.h" +#include "soc15_common.h" + +#include "gfx_v9_4.h" +#include "amdgpu_ras.h" + +static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = { + /* CPC */ + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 }, + /* DC */ + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 }, + /* CPF */ + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 }, + /* GDS */ + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 }, + /* SPI */ + { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 }, + /* SQ */ + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 8, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 8, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 8, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 8, 16 }, + /* SQC */ + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 }, + /* TA */ + { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 }, + /* TCA */ + { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 }, + /* TCC */ + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 }, + /* TCI */ + { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 }, + /* TCP */ + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 }, + /* TD */ + { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 }, + /* GCEA */ + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 }, + /* RLC */ + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 }, +}; + +static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, + instance); + + if (se_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + + WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data); +} + +static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = { + /* CPC */ + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) }, + { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) }, + { "CPC_DC_RESTORE_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) }, + { "CPC_DC_RESTORE_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) }, + + /* CPF */ + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) }, + { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) }, + + /* GDS */ + { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) }, + { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) }, + { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE0_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE1_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE2_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE3_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, + + /* SPI */ + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) }, + { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) }, + { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) }, + { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) }, + { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) }, + + /* SQ */ + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + + /* SQC */ + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, + + /* TA */ + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) }, + + /* TCA */ + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) }, + + /* TCC */ + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) }, + + /* TCI */ + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) }, + + /* TCP */ + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, + + /* TD */ + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) }, + + /* EA */ + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, + { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) }, + { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) }, + { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) }, + { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) }, + { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) }, + { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) }, + { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) }, + { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) }, + { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) }, + + /* RLC */ + { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) }, + { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) }, + { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) }, + { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) }, + { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) }, + { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) }, + { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) }, + { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) }, +}; + +static const char * const vml2_mems[] = { + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_0_4K_MEM0", + "UTC_VML2_BANK_CACHE_0_4K_MEM1", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_1_4K_MEM0", + "UTC_VML2_BANK_CACHE_1_4K_MEM1", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_2_4K_MEM0", + "UTC_VML2_BANK_CACHE_2_4K_MEM1", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_3_4K_MEM0", + "UTC_VML2_BANK_CACHE_3_4K_MEM1", + "UTC_VML2_IFIFO_GROUP0", + "UTC_VML2_IFIFO_GROUP1", + "UTC_VML2_IFIFO_GROUP2", + "UTC_VML2_IFIFO_GROUP3", + "UTC_VML2_IFIFO_GROUP4", + "UTC_VML2_IFIFO_GROUP5", + "UTC_VML2_IFIFO_GROUP6", + "UTC_VML2_IFIFO_GROUP7", + "UTC_VML2_IFIFO_GROUP8", + "UTC_VML2_IFIFO_GROUP9", + "UTC_VML2_IFIFO_GROUP10", + "UTC_VML2_IFIFO_GROUP11", + "UTC_VML2_IFIFO_GROUP12", + "UTC_VML2_IFIFO_GROUP13", + "UTC_VML2_IFIFO_GROUP14", + "UTC_VML2_IFIFO_GROUP15", + "UTC_VML2_IFIFO_GROUP16", + "UTC_VML2_IFIFO_GROUP17", + "UTC_VML2_IFIFO_GROUP18", + "UTC_VML2_IFIFO_GROUP19", + "UTC_VML2_IFIFO_GROUP20", + "UTC_VML2_IFIFO_GROUP21", + "UTC_VML2_IFIFO_GROUP22", + "UTC_VML2_IFIFO_GROUP23", + "UTC_VML2_IFIFO_GROUP24", +}; + +static const char * const vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_ARADDRS", + "UTC_VML2_RDIF_LOG_FIFO", + "UTC_VML2_QUEUE_REQ", + "UTC_VML2_QUEUE_RET", +}; + +static const char * const utcl2_router_mems[] = { + "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0", + "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1", + "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2", + "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3", + "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4", + "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5", + "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6", + "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7", + "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8", + "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9", + "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10", + "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11", + "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12", + "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13", + "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14", + "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15", + "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16", + "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17", + "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18", + "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19", + "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20", + "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21", + "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22", + "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23", + "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24", +}; + +static const char * const atc_l2_cache_2m_mems[] = { + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", +}; + +static const char * const atc_l2_cache_4k_mems[] = { + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", +}; + +static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, + struct ras_err_data *err_data) +{ + uint32_t i, data; + uint32_t sec_count, ded_count; + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + SEC_COUNT); + if (sec_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_walker_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + DED_COUNT); + if (ded_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_walker_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, + utcl2_router_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, + utcl2_router_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_2m_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + DED_COUNT); + if (ded_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_2m_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_4k_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + DED_COUNT); + if (ded_count) { + dev_info(adev->dev, + "Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_4k_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + + return 0; +} + +static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, + uint32_t value, uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) { + if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_4_ras_fields[i].seg != reg->seg || + gfx_v9_4_ras_fields[i].inst != reg->inst) + continue; + + sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >> + gfx_v9_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + dev_info(adev->dev, + "GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >> + gfx_v9_4_ras_fields[i].ded_count_shift; + if (ded_cnt) { + dev_info(adev->dev, + "GFX SubBlock %s, Instance[%d][%d], DED %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i, j, k; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0, k); + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + if (reg_value) + gfx_v9_4_ras_error_count(adev, + &gfx_v9_4_edc_counter_regs[i], + j, k, reg_value, &sec_count, + &ded_count); + } + } + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; + + gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v9_4_query_utc_edc_status(adev, err_data); + + return 0; +} + +static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i, j, k; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); +} + +static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, + void *inject_if) +{ + struct ras_inject_if *info = (struct ras_inject_if *)inject_if; + int ret; + struct ta_ras_trigger_error_input block_info = { 0 }; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); + block_info.sub_block_index = info->head.sub_block_index; + block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); + block_info.address = info->address; + block_info.value = info->value; + + mutex_lock(&adev->grbm_idx_mutex); + ret = psp_ras_trigger_error(&adev->psp, &block_info); + mutex_unlock(&adev->grbm_idx_mutex); + + return ret; +} + +static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = + { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 }; + +static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) +{ + uint32_t i, j; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < gfx_v9_4_ea_err_status_regs.se_num; i++) { + for (j = 0; j < gfx_v9_4_ea_err_status_regs.instance; + j++) { + gfx_v9_4_select_se_sh(adev, i, 0, j); + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_ea_err_status_regs)); + if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) || + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) || + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { + /* SDP read/write error/parity error in FUE_IS_FATAL mode + * can cause system fatal error in arcturas. Harvest the error + * status before GPU reset */ + dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n", + j, reg_value); + } + } + } + + gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = { + .ras_late_init = amdgpu_gfx_ras_late_init, + .ras_fini = amdgpu_gfx_ras_fini, + .ras_error_inject = &gfx_v9_4_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_query_ras_error_status, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h new file mode 100644 index 000000000000..bdd16b568021 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V9_4_H__ +#define __GFX_V9_4_H__ + +extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs; + +#endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c new file mode 100644 index 000000000000..c4f37a161875 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -0,0 +1,1943 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" +#include "soc15d.h" + +#include "gc/gc_9_4_2_offset.h" +#include "gc/gc_9_4_2_sh_mask.h" +#include "gfx_v9_0.h" + +#include "gfx_v9_4_2.h" +#include "amdgpu_ras.h" +#include "amdgpu_gfx.h" + +#define SE_ID_MAX 8 +#define CU_ID_MAX 16 +#define SIMD_ID_MAX 4 +#define WAVE_ID_MAX 10 + +enum gfx_v9_4_2_utc_type { + VML2_MEM, + VML2_WALKER_MEM, + UTCL2_MEM, + ATC_L2_CACHE_2M, + ATC_L2_CACHE_32K, + ATC_L2_CACHE_4K +}; + +struct gfx_v9_4_2_utc_block { + enum gfx_v9_4_2_utc_type type; + uint32_t num_banks; + uint32_t num_ways; + uint32_t num_mem_blocks; + struct soc15_reg idx_reg; + struct soc15_reg data_reg; + uint32_t sec_count_mask; + uint32_t sec_count_shift; + uint32_t ded_count_mask; + uint32_t ded_count_shift; + uint32_t clear; +}; + +static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_0[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x141dc920), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0x3b458b93), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x1a4f5583), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0x317717f6), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x107cc1e6), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x351), +}; + +static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_1[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x2591aa38), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0xac9e88b), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x2bc3369b), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0xfb74ee), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x21f0a2fe), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x49), +}; + +static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xfffffeef, 0x10b0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_UTCL1_CNTL1, 0xffffffff, 0x30800400), + SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20), +}; + +/* + * This shader is used to clear VGPRS and LDS, and also write the input + * pattern into the write back buffer, which will be used by driver to + * check whether all SIMDs have been covered. +*/ +static const u32 vgpr_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xd3d94000, + 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080, 0xd3d94003, + 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080, 0xd3d94006, + 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080, 0xd3d94009, + 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080, 0xd3d9400c, + 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080, 0xd3d9400f, + 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080, 0xd3d94012, + 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080, 0xd3d94015, + 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080, 0xd3d94018, + 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080, 0xd3d9401b, + 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080, 0xd3d9401e, + 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080, 0xd3d94021, + 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080, 0xd3d94024, + 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080, 0xd3d94027, + 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080, 0xd3d9402a, + 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080, 0xd3d9402d, + 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080, 0xd3d94030, + 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080, 0xd3d94033, + 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080, 0xd3d94036, + 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080, 0xd3d94039, + 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080, 0xd3d9403c, + 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080, 0xd3d9403f, + 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080, 0xd3d94042, + 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080, 0xd3d94045, + 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080, 0xd3d94048, + 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080, 0xd3d9404b, + 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080, 0xd3d9404e, + 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080, 0xd3d94051, + 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080, 0xd3d94054, + 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080, 0xd3d94057, + 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080, 0xd3d9405a, + 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080, 0xd3d9405d, + 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080, 0xd3d94060, + 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080, 0xd3d94063, + 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080, 0xd3d94066, + 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080, 0xd3d94069, + 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080, 0xd3d9406c, + 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080, 0xd3d9406f, + 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080, 0xd3d94072, + 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080, 0xd3d94075, + 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080, 0xd3d94078, + 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080, 0xd3d9407b, + 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080, 0xd3d9407e, + 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080, 0xd3d94081, + 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080, 0xd3d94084, + 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080, 0xd3d94087, + 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080, 0xd3d9408a, + 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080, 0xd3d9408d, + 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080, 0xd3d94090, + 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080, 0xd3d94093, + 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080, 0xd3d94096, + 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080, 0xd3d94099, + 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080, 0xd3d9409c, + 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080, 0xd3d9409f, + 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080, 0xd3d940a2, + 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080, 0xd3d940a5, + 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080, 0xd3d940a8, + 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080, 0xd3d940ab, + 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080, 0xd3d940ae, + 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080, 0xd3d940b1, + 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080, 0xd3d940b4, + 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080, 0xd3d940b7, + 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080, 0xd3d940ba, + 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080, 0xd3d940bd, + 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080, 0xd3d940c0, + 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080, 0xd3d940c3, + 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080, 0xd3d940c6, + 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080, 0xd3d940c9, + 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080, 0xd3d940cc, + 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080, 0xd3d940cf, + 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080, 0xd3d940d2, + 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080, 0xd3d940d5, + 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080, 0xd3d940d8, + 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080, 0xd3d940db, + 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080, 0xd3d940de, + 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080, 0xd3d940e1, + 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080, 0xd3d940e4, + 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080, 0xd3d940e7, + 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080, 0xd3d940ea, + 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080, 0xd3d940ed, + 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080, 0xd3d940f0, + 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080, 0xd3d940f3, + 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080, 0xd3d940f6, + 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080, 0xd3d940f9, + 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080, 0xd3d940fc, + 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080, 0xd3d940ff, + 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a, 0x7e000280, + 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280, 0x7e0c0280, + 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000, 0xd28c0001, + 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xbe8b0004, 0xb78b4000, + 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000, 0x00020201, + 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a, 0xbf84fff8, + 0xbf810000, +}; + +const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 4 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0xbf }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x400006 }, /* 64KB LDS */ + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x3F }, /* 63 - accum-offset = 256 */ + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +/* + * The below shaders are used to clear SGPRS, and also write the input + * pattern into the write back buffer. The first two dispatch should be + * scheduled simultaneously which make sure that all SGPRS could be + * allocated, so the dispatch 1 need check write back buffer before scheduled, + * make sure that waves of dispatch 0 are all dispacthed to all simds + * balanced. both dispatch 0 and dispatch 1 should be halted until all waves + * are dispatched, and then driver write a pattern to the shared memory to make + * all waves continue. +*/ +static const u32 sgpr112_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200, + 0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102, + 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080, + 0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080, + 0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080, + 0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080, + 0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080, + 0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080, + 0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080, + 0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080, + 0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080, + 0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080, + 0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080, + 0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080, + 0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080, + 0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080, + 0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080, + 0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080, + 0xbeda0080, 0xbedb0080, 0xbedc0080, 0xbedd0080, 0xbede0080, 0xbedf0080, + 0xbee00080, 0xbee10080, 0xbee20080, 0xbee30080, 0xbee40080, 0xbee50080, + 0xbf810000 +}; + +const struct soc15_reg_entry sgpr112_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 8 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x340 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +static const u32 sgpr96_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200, + 0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102, + 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080, + 0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080, + 0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080, + 0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080, + 0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080, + 0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080, + 0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080, + 0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080, + 0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080, + 0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080, + 0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080, + 0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080, + 0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080, + 0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080, + 0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080, + 0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080, + 0xbf810000, +}; + +const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0xc }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x2c0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +/* + * This shader is used to clear the uninitiated sgprs after the above + * two dispatches, because of hardware feature, dispath 0 couldn't clear + * top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs +*/ +static const u32 sgpr64_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200, + 0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102, + 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080, + 0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080, + 0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080, + 0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080, + 0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080, + 0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080, + 0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080, + 0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080, + 0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080, + 0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080, + 0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbf810000, +}; + +const struct soc15_reg_entry sgpr64_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0x10 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x1c0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_ib *ib, + const u32 *shader_ptr, u32 shader_size, + const struct soc15_reg_entry *init_regs, u32 regs_size, + u32 compute_dim_x, u64 wb_gpu_addr, u32 pattern, + struct dma_fence **fence_ptr) +{ + int r, i; + uint32_t total_size, shader_offset; + u64 gpu_addr; + + total_size = (regs_size * 3 + 4 + 5 + 5) * 4; + total_size = ALIGN(total_size, 256); + shader_offset = total_size; + total_size += ALIGN(shader_size, 256); + + /* allocate an indirect buffer to put the commands in */ + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, total_size, + AMDGPU_IB_POOL_DIRECT, ib); + if (r) { + dev_err(adev->dev, "failed to get ib (%d).\n", r); + return r; + } + + /* load the compute shaders */ + for (i = 0; i < shader_size/sizeof(u32); i++) + ib->ptr[i + (shader_offset / 4)] = shader_ptr[i]; + + /* init the ib length to 0 */ + ib->length_dw = 0; + + /* write the register state for the compute dispatch */ + for (i = 0; i < regs_size; i++) { + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib->ptr[ib->length_dw++] = SOC15_REG_ENTRY_OFFSET(init_regs[i]) + - PACKET3_SET_SH_REG_START; + ib->ptr[ib->length_dw++] = init_regs[i].reg_value; + } + + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib->gpu_addr + (u64)shader_offset) >> 8; + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_PGM_LO) + - PACKET3_SET_SH_REG_START; + ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr); + ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr); + + /* write the wb buffer address */ + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 3); + ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_USER_DATA_0) + - PACKET3_SET_SH_REG_START; + ib->ptr[ib->length_dw++] = lower_32_bits(wb_gpu_addr); + ib->ptr[ib->length_dw++] = upper_32_bits(wb_gpu_addr); + ib->ptr[ib->length_dw++] = pattern; + + /* write dispatch packet */ + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib->ptr[ib->length_dw++] = compute_dim_x; /* x */ + ib->ptr[ib->length_dw++] = 1; /* y */ + ib->ptr[ib->length_dw++] = 1; /* z */ + ib->ptr[ib->length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* shedule the ib on the ring */ + r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr); + if (r) { + dev_err(adev->dev, "ib submit failed (%d).\n", r); + amdgpu_ib_free(adev, ib, NULL); + } + return r; +} + +static void gfx_v9_4_2_log_wave_assignment(struct amdgpu_device *adev, uint32_t *wb_ptr) +{ + uint32_t se, cu, simd, wave; + uint32_t offset = 0; + char *str; + int size; + + str = kmalloc(256, GFP_KERNEL); + if (!str) + return; + + dev_dbg(adev->dev, "wave assignment:\n"); + + for (se = 0; se < adev->gfx.config.max_shader_engines; se++) { + for (cu = 0; cu < CU_ID_MAX; cu++) { + memset(str, 0, 256); + size = sprintf(str, "SE[%02d]CU[%02d]: ", se, cu); + for (simd = 0; simd < SIMD_ID_MAX; simd++) { + size += sprintf(str + size, "["); + for (wave = 0; wave < WAVE_ID_MAX; wave++) { + size += sprintf(str + size, "%x", wb_ptr[offset]); + offset++; + } + size += sprintf(str + size, "] "); + } + dev_dbg(adev->dev, "%s\n", str); + } + } + + kfree(str); +} + +static int gfx_v9_4_2_wait_for_waves_assigned(struct amdgpu_device *adev, + uint32_t *wb_ptr, uint32_t mask, + uint32_t pattern, uint32_t num_wave, bool wait) +{ + uint32_t se, cu, simd, wave; + uint32_t loop = 0; + uint32_t wave_cnt; + uint32_t offset; + + do { + wave_cnt = 0; + offset = 0; + + for (se = 0; se < adev->gfx.config.max_shader_engines; se++) + for (cu = 0; cu < CU_ID_MAX; cu++) + for (simd = 0; simd < SIMD_ID_MAX; simd++) + for (wave = 0; wave < WAVE_ID_MAX; wave++) { + if (((1 << wave) & mask) && + (wb_ptr[offset] == pattern)) + wave_cnt++; + + offset++; + } + + if (wave_cnt == num_wave) + return 0; + + mdelay(1); + } while (++loop < 2000 && wait); + + dev_err(adev->dev, "actual wave num: %d, expected wave num: %d\n", + wave_cnt, num_wave); + + gfx_v9_4_2_log_wave_assignment(adev, wb_ptr); + + return -EBADSLT; +} + +static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev) +{ + int r; + int wb_size = adev->gfx.config.max_shader_engines * + CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX; + struct amdgpu_ib wb_ib; + struct amdgpu_ib disp_ibs[3]; + struct dma_fence *fences[3]; + u32 pattern[3] = { 0x1, 0x5, 0xa }; + + /* bail if the compute ring is not ready */ + if (!adev->gfx.compute_ring[0].sched.ready || + !adev->gfx.compute_ring[1].sched.ready) + return 0; + + /* allocate the write-back buffer from IB */ + memset(&wb_ib, 0, sizeof(wb_ib)); + r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t), + AMDGPU_IB_POOL_DIRECT, &wb_ib); + if (r) { + dev_err(adev->dev, "failed to get ib (%d) for wb\n", r); + return r; + } + memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t)); + + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[0], + &disp_ibs[0], + sgpr112_init_compute_shader_aldebaran, + sizeof(sgpr112_init_compute_shader_aldebaran), + sgpr112_init_regs_aldebaran, + ARRAY_SIZE(sgpr112_init_regs_aldebaran), + adev->gfx.cu_info.number, + wb_ib.gpu_addr, pattern[0], &fences[0]); + if (r) { + dev_err(adev->dev, "failed to clear first 224 sgprs\n"); + goto pro_end; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b11, + pattern[0], + adev->gfx.cu_info.number * SIMD_ID_MAX * 2, + true); + if (r) { + dev_err(adev->dev, "wave coverage failed when clear first 224 sgprs\n"); + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + goto disp0_failed; + } + + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[1], + &disp_ibs[1], + sgpr96_init_compute_shader_aldebaran, + sizeof(sgpr96_init_compute_shader_aldebaran), + sgpr96_init_regs_aldebaran, + ARRAY_SIZE(sgpr96_init_regs_aldebaran), + adev->gfx.cu_info.number * 2, + wb_ib.gpu_addr, pattern[1], &fences[1]); + if (r) { + dev_err(adev->dev, "failed to clear next 576 sgprs\n"); + goto disp0_failed; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b11111100, + pattern[1], adev->gfx.cu_info.number * SIMD_ID_MAX * 6, + true); + if (r) { + dev_err(adev->dev, "wave coverage failed when clear first 576 sgprs\n"); + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + goto disp1_failed; + } + + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + + /* wait for the GPU to finish processing the IB */ + r = dma_fence_wait(fences[0], false); + if (r) { + dev_err(adev->dev, "timeout to clear first 224 sgprs\n"); + goto disp1_failed; + } + + r = dma_fence_wait(fences[1], false); + if (r) { + dev_err(adev->dev, "timeout to clear first 576 sgprs\n"); + goto disp1_failed; + } + + memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t)); + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[0], + &disp_ibs[2], + sgpr64_init_compute_shader_aldebaran, + sizeof(sgpr64_init_compute_shader_aldebaran), + sgpr64_init_regs_aldebaran, + ARRAY_SIZE(sgpr64_init_regs_aldebaran), + adev->gfx.cu_info.number, + wb_ib.gpu_addr, pattern[2], &fences[2]); + if (r) { + dev_err(adev->dev, "failed to clear first 256 sgprs\n"); + goto disp1_failed; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b1111, + pattern[2], + adev->gfx.cu_info.number * SIMD_ID_MAX * 4, + true); + if (r) { + dev_err(adev->dev, "wave coverage failed when clear first 256 sgprs\n"); + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + goto disp2_failed; + } + + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + + r = dma_fence_wait(fences[2], false); + if (r) { + dev_err(adev->dev, "timeout to clear first 256 sgprs\n"); + goto disp2_failed; + } + +disp2_failed: + amdgpu_ib_free(adev, &disp_ibs[2], NULL); + dma_fence_put(fences[2]); +disp1_failed: + amdgpu_ib_free(adev, &disp_ibs[1], NULL); + dma_fence_put(fences[1]); +disp0_failed: + amdgpu_ib_free(adev, &disp_ibs[0], NULL); + dma_fence_put(fences[0]); +pro_end: + amdgpu_ib_free(adev, &wb_ib, NULL); + + if (r) + dev_info(adev->dev, "Init SGPRS Failed\n"); + else + dev_info(adev->dev, "Init SGPRS Successfully\n"); + + return r; +} + +static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev) +{ + int r; + /* CU_ID: 0~15, SIMD_ID: 0~3, WAVE_ID: 0 ~ 9 */ + int wb_size = adev->gfx.config.max_shader_engines * + CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX; + struct amdgpu_ib wb_ib; + struct amdgpu_ib disp_ib; + struct dma_fence *fence; + u32 pattern = 0xa; + + /* bail if the compute ring is not ready */ + if (!adev->gfx.compute_ring[0].sched.ready) + return 0; + + /* allocate the write-back buffer from IB */ + memset(&wb_ib, 0, sizeof(wb_ib)); + r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t), + AMDGPU_IB_POOL_DIRECT, &wb_ib); + if (r) { + dev_err(adev->dev, "failed to get ib (%d) for wb.\n", r); + return r; + } + memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t)); + + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[0], + &disp_ib, + vgpr_init_compute_shader_aldebaran, + sizeof(vgpr_init_compute_shader_aldebaran), + vgpr_init_regs_aldebaran, + ARRAY_SIZE(vgpr_init_regs_aldebaran), + adev->gfx.cu_info.number, + wb_ib.gpu_addr, pattern, &fence); + if (r) { + dev_err(adev->dev, "failed to clear vgprs\n"); + goto pro_end; + } + + /* wait for the GPU to finish processing the IB */ + r = dma_fence_wait(fence, false); + if (r) { + dev_err(adev->dev, "timeout to clear vgprs\n"); + goto disp_failed; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b1, + pattern, + adev->gfx.cu_info.number * SIMD_ID_MAX, + false); + if (r) { + dev_err(adev->dev, "failed to cover all simds when clearing vgprs\n"); + goto disp_failed; + } + +disp_failed: + amdgpu_ib_free(adev, &disp_ib, NULL); + dma_fence_put(fence); +pro_end: + amdgpu_ib_free(adev, &wb_ib, NULL); + + if (r) + dev_info(adev->dev, "Init VGPRS Failed\n"); + else + dev_info(adev->dev, "Init VGPRS Successfully\n"); + + return r; +} + +int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev) +{ + /* only support when RAS is enabled */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return 0; + + /* Workaround for ALDEBARAN, skip GPRs init in GPU reset. + Will remove it once GPRs init algorithm works for all CU settings. */ + if (amdgpu_in_reset(adev)) + return 0; + + gfx_v9_4_2_do_sgprs_init(adev); + + gfx_v9_4_2_do_vgprs_init(adev); + + return 0; +} + +static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev); +static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev); + +void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, + uint32_t die_id) +{ + soc15_program_register_sequence(adev, + golden_settings_gc_9_4_2_alde, + ARRAY_SIZE(golden_settings_gc_9_4_2_alde)); + + /* apply golden settings per die */ + switch (die_id) { + case 0: + soc15_program_register_sequence(adev, + golden_settings_gc_9_4_2_alde_die_0, + ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_0)); + break; + case 1: + soc15_program_register_sequence(adev, + golden_settings_gc_9_4_2_alde_die_1, + ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_1)); + break; + default: + dev_warn(adev->dev, + "invalid die id %d, ignore channel fabricid remap settings\n", + die_id); + break; + } + + return; +} + +void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, + uint32_t first_vmid, + uint32_t last_vmid) +{ + uint32_t data; + int i; + + mutex_lock(&adev->srbm_mutex); + + for (i = first_vmid; i < last_vmid; i++) { + data = 0; + soc15_grbm_select(adev, 0, 0, 0, i); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, + 0); + WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data); + } + + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) +{ + u32 tmp; + + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + + tmp = 0; + tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1); + WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL, tmp); + + tmp = 0; + tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL1, PWRBRK_STALL_EN, 1); + WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL1, tmp); + + WREG32_SOC15(GC, 0, regGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL); + tmp = 0; + tmp = REG_SET_FIELD(tmp, PWRBRK_STALL_PATTERN_CTRL, PWRBRK_END_STEP, 0x12); + WREG32_SOC15(GC, 0, regGC_CAC_IND_DATA, tmp); +} + +static const struct soc15_reg_entry gfx_v9_4_2_edc_counter_regs[] = { + /* CPF */ + { SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT), 0, 1, 1 }, + /* CPC */ + { SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT), 0, 1, 1 }, + /* GDS */ + { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), 0, 1, 1 }, + /* RLC */ + { SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), 0, 1, 1 }, + /* SPI */ + { SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), 0, 8, 1 }, + /* SQC */ + { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), 0, 8, 7 }, + { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), 0, 8, 7 }, + { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), 0, 8, 7 }, + { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), 0, 8, 7 }, + /* SQ */ + { SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), 0, 8, 14 }, + /* TCP */ + { SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), 0, 8, 14 }, + /* TCI */ + { SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT), 0, 1, 69 }, + /* TCC */ + { SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), 0, 1, 16 }, + { SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), 0, 1, 16 }, + /* TCA */ + { SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT), 0, 1, 2 }, + /* TCX */ + { SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), 0, 1, 2 }, + { SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), 0, 1, 2 }, + /* TD */ + { SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), 0, 8, 14 }, + /* TA */ + { SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), 0, 8, 14 }, + /* GCEA */ + { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), 0, 1, 16 }, + { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), 0, 1, 16 }, + { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 1, 16 }, +}; + +static void gfx_v9_4_2_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, + instance); + + if (se_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + + WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data); +} + +static const struct soc15_ras_field_entry gfx_v9_4_2_ras_fields[] = { + /* CPF */ + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) }, + { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) }, + + /* CPC */ + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) }, + { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) }, + { "CPC_DC_RESTORE_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) }, + { "CPC_DC_RESTORE_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) }, + + /* GDS */ + { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) }, + { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) }, + { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE0_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE1_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE2_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE3_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, + { "GDS_ME0_GFXHP3D_PIX_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_PIX_DED) }, + { "GDS_ME0_GFXHP3D_VTX_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_VTX_DED) }, + { "GDS_ME0_CS_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_CS_DED) }, + { "GDS_ME0_GFXHP3D_GS_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_GS_DED) }, + { "GDS_ME1_PIPE0_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE0_DED) }, + { "GDS_ME1_PIPE1_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE1_DED) }, + { "GDS_ME1_PIPE2_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE2_DED) }, + { "GDS_ME1_PIPE3_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE3_DED) }, + { "GDS_ME2_PIPE0_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE0_DED) }, + { "GDS_ME2_PIPE1_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE1_DED) }, + { "GDS_ME2_PIPE2_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE2_DED) }, + { "GDS_ME2_PIPE3_DED", + SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0, + SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE3_DED) }, + + /* RLC */ + { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) }, + { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) }, + { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) }, + { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) }, + { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) }, + { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) }, + { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) }, + { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) }, + + /* SPI */ + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) }, + { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) }, + { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) }, + { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) }, + + /* SQC - regSQC_EDC_CNT */ + { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU3_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU3_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_DED_COUNT) }, + + /* SQC - regSQC_EDC_CNT2 */ + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_DED_COUNT) }, + + /* SQC - regSQC_EDC_CNT3 */ + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_DED_COUNT) }, + + /* SQC - regSQC_EDC_PARITY_CNT3 */ + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_DED_COUNT) }, + + /* SQ */ + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) }, + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + + /* TCP */ + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_DED_COUNT) }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, + + /* TCI */ + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) }, + + /* TCC */ + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) }, + + /* TCA */ + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) }, + + /* TCX */ + { "TCX_GROUP0", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_SEC_COUNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_DED_COUNT) }, + { "TCX_GROUP1", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_SEC_COUNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_DED_COUNT) }, + { "TCX_GROUP2", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_SEC_COUNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_DED_COUNT) }, + { "TCX_GROUP3", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_SEC_COUNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_DED_COUNT) }, + { "TCX_GROUP4", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_SEC_COUNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_DED_COUNT) }, + { "TCX_GROUP5", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP5_SED_COUNT), 0, 0 }, + { "TCX_GROUP6", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP6_SED_COUNT), 0, 0 }, + { "TCX_GROUP7", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP7_SED_COUNT), 0, 0 }, + { "TCX_GROUP8", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP8_SED_COUNT), 0, 0 }, + { "TCX_GROUP9", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP9_SED_COUNT), 0, 0 }, + { "TCX_GROUP10", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), + SOC15_REG_FIELD(TCX_EDC_CNT, GROUP10_SED_COUNT), 0, 0 }, + { "TCX_GROUP11", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), + SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP11_SED_COUNT), 0, 0 }, + { "TCX_GROUP12", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), + SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP12_SED_COUNT), 0, 0 }, + { "TCX_GROUP13", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), + SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP13_SED_COUNT), 0, 0 }, + { "TCX_GROUP14", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), + SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP14_SED_COUNT), 0, 0 }, + + /* TD */ + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) }, + + /* TA */ + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, + { "TA_FS_AFIFO_LO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_DED_COUNT) }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) }, + { "TA_FS_AFIFO_HI", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_DED_COUNT) }, + + /* EA - regGCEA_EDC_CNT */ + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_DED_COUNT) }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 }, + + /* EA - regGCEA_EDC_CNT2 */ + { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) }, + { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) }, + { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) }, + { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) }, + + /* EA - regGCEA_EDC_CNT3 */ + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) }, + { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) }, + { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) }, + { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) }, + { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) }, +}; + +static const char * const vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_ARADDRS", + "UTC_VML2_RDIF_LOG_FIFO", + "UTC_VML2_QUEUE_REQ", + "UTC_VML2_QUEUE_RET", +}; + +static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = { + { VML2_MEM, 8, 2, 2, + { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_INDEX) }, + { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_CNTL) }, + SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, SEC_COUNT), + SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, DED_COUNT), + REG_SET_FIELD(0, VML2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) }, + { VML2_WALKER_MEM, ARRAY_SIZE(vml2_walker_mems), 1, 1, + { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_INDEX) }, + { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_CNTL) }, + SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, SEC_COUNT), + SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, DED_COUNT), + REG_SET_FIELD(0, VML2_WALKER_MEM_ECC_CNTL, WRITE_COUNTERS, 1) }, + { UTCL2_MEM, 18, 1, 2, + { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_INDEX) }, + { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_CNTL) }, + SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, SEC_COUNT), + SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, DED_COUNT), + REG_SET_FIELD(0, UTCL2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) }, + { ATC_L2_CACHE_2M, 8, 2, 1, + { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_INDEX) }, + { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_CNTL) }, + SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, SEC_COUNT), + SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, DED_COUNT), + REG_SET_FIELD(0, ATC_L2_CACHE_2M_DSM_CNTL, WRITE_COUNTERS, 1) }, + { ATC_L2_CACHE_32K, 8, 2, 2, + { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_INDEX) }, + { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_CNTL) }, + SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, SEC_COUNT), + SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, DED_COUNT), + REG_SET_FIELD(0, ATC_L2_CACHE_32K_DSM_CNTL, WRITE_COUNTERS, 1) }, + { ATC_L2_CACHE_4K, 8, 2, 8, + { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_INDEX) }, + { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_CNTL) }, + SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, SEC_COUNT), + SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, DED_COUNT), + REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) }, +}; + +static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs = { + SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 +}; + +static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, + uint32_t value, uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_ras_fields); i++) { + if (gfx_v9_4_2_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_4_2_ras_fields[i].seg != reg->seg || + gfx_v9_4_2_ras_fields[i].inst != reg->inst) + continue; + + sec_cnt = SOC15_RAS_REG_FIELD_VAL( + value, gfx_v9_4_2_ras_fields[i], sec); + if (sec_cnt) { + dev_info(adev->dev, + "GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + gfx_v9_4_2_ras_fields[i].name, se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = SOC15_RAS_REG_FIELD_VAL( + value, gfx_v9_4_2_ras_fields[i], ded); + if (ded_cnt) { + dev_info(adev->dev, + "GFX SubBlock %s, Instance[%d][%d], DED %d\n", + gfx_v9_4_2_ras_fields[i].name, se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +static int gfx_v9_4_2_query_sram_edc_count(struct amdgpu_device *adev, + uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i, j, k, data; + uint32_t sec_cnt = 0, ded_cnt = 0; + + if (sec_count && ded_count) { + *sec_count = 0; + *ded_count = 0; + } + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_2_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_2_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_2_select_se_sh(adev, j, 0, k); + + /* if sec/ded_count is null, just clear counter */ + if (!sec_count || !ded_count) { + WREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_2_edc_counter_regs[i]), 0); + continue; + } + + data = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_2_edc_counter_regs[i])); + + if (!data) + continue; + + gfx_v9_4_2_get_reg_error_count(adev, + &gfx_v9_4_2_edc_counter_regs[i], + j, k, data, &sec_cnt, &ded_cnt); + + /* clear counter after read */ + WREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_2_edc_counter_regs[i]), 0); + } + } + } + + if (sec_count && ded_count) { + *sec_count += sec_cnt; + *ded_count += ded_cnt; + } + + gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev, + struct gfx_v9_4_2_utc_block *blk, + uint32_t instance, uint32_t sec_cnt, + uint32_t ded_cnt) +{ + uint32_t bank, way, mem; + static const char *vml2_way_str[] = { "BIGK", "4K" }; + static const char *utcl2_rounter_str[] = { "VMC", "APT" }; + + mem = instance % blk->num_mem_blocks; + way = (instance / blk->num_mem_blocks) % blk->num_ways; + bank = instance / (blk->num_mem_blocks * blk->num_ways); + + switch (blk->type) { + case VML2_MEM: + dev_info( + adev->dev, + "GFX SubBlock UTC_VML2_BANK_CACHE_%d_%s_MEM%d, SED %d, DED %d\n", + bank, vml2_way_str[way], mem, sec_cnt, ded_cnt); + break; + case VML2_WALKER_MEM: + dev_info(adev->dev, "GFX SubBlock %s, SED %d, DED %d\n", + vml2_walker_mems[bank], sec_cnt, ded_cnt); + break; + case UTCL2_MEM: + dev_info( + adev->dev, + "GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n", + bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt); + break; + case ATC_L2_CACHE_2M: + dev_info( + adev->dev, + "GFX SubBlock UTC_ATCL2_CACHE_2M_BANK%d_WAY%d_MEM, SED %d, DED %d\n", + bank, way, sec_cnt, ded_cnt); + break; + case ATC_L2_CACHE_32K: + dev_info( + adev->dev, + "GFX SubBlock UTC_ATCL2_CACHE_32K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n", + bank, way, mem, sec_cnt, ded_cnt); + break; + case ATC_L2_CACHE_4K: + dev_info( + adev->dev, + "GFX SubBlock UTC_ATCL2_CACHE_4K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n", + bank, way, mem, sec_cnt, ded_cnt); + break; + } +} + +static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev, + uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i, j, data; + uint32_t sec_cnt, ded_cnt; + uint32_t num_instances; + struct gfx_v9_4_2_utc_block *blk; + + if (sec_count && ded_count) { + *sec_count = 0; + *ded_count = 0; + } + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_utc_blocks); i++) { + blk = &gfx_v9_4_2_utc_blocks[i]; + num_instances = + blk->num_banks * blk->num_ways * blk->num_mem_blocks; + for (j = 0; j < num_instances; j++) { + WREG32(SOC15_REG_ENTRY_OFFSET(blk->idx_reg), j); + + /* if sec/ded_count is NULL, just clear counter */ + if (!sec_count || !ded_count) { + WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg), + blk->clear); + continue; + } + + data = RREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg)); + if (!data) + continue; + + sec_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, sec); + *sec_count += sec_cnt; + ded_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, ded); + *ded_count += ded_cnt; + + /* clear counter after read */ + WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg), + blk->clear); + + /* print the edc count */ + if (sec_cnt || ded_cnt) + gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt, + ded_cnt); + } + } + + return 0; +} + +static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + gfx_v9_4_2_query_sram_edc_count(adev, &sec_count, &ded_count); + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; + + gfx_v9_4_2_query_utc_edc_count(adev, &sec_count, &ded_count); + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; + + return 0; +} + +static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev) +{ + WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3); + WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3); + WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3); +} + +static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev) +{ + uint32_t i, j; + uint32_t value; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) { + for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance; + j++) { + gfx_v9_4_2_select_se_sh(adev, i, 0, j); + value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_2_ea_err_status_regs)); + value = REG_SET_FIELD(value, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1); + WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), value); + } + } + gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + + gfx_v9_4_2_query_sram_edc_count(adev, NULL, NULL); + gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL); +} + +static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if) +{ + struct ras_inject_if *info = (struct ras_inject_if *)inject_if; + int ret; + struct ta_ras_trigger_error_input block_info = { 0 }; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); + block_info.sub_block_index = info->head.sub_block_index; + block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); + block_info.address = info->address; + block_info.value = info->value; + + mutex_lock(&adev->grbm_idx_mutex); + ret = psp_ras_trigger_error(&adev->psp, &block_info); + mutex_unlock(&adev->grbm_idx_mutex); + + return ret; +} + +static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev) +{ + uint32_t i, j; + uint32_t reg_value; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) { + for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance; + j++) { + gfx_v9_4_2_select_se_sh(adev, i, 0, j); + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_2_ea_err_status_regs)); + + if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) || + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) || + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { + dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n", + j, reg_value); + } + /* clear after read */ + reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS, + CLEAR_ERROR_STATUS, 0x1); + WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), reg_value); + } + } + + gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS); + if (data) { + dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data); + WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3); + } + + data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS); + if (data) { + dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data); + WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3); + } + + data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS); + if (data) { + dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data); + WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3); + } +} + +static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + + gfx_v9_4_2_query_ea_err_status(adev); + gfx_v9_4_2_query_utc_err_status(adev); + gfx_v9_4_2_query_sq_timeout_status(adev); +} + +static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return; + + gfx_v9_4_2_reset_utc_err_status(adev); + gfx_v9_4_2_reset_ea_err_status(adev); + gfx_v9_4_2_reset_sq_timeout_status(adev); +} + +static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev) +{ + uint32_t i; + uint32_t data; + + data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, + amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : + 0); + + if (amdgpu_watchdog_timer.timeout_fatal_disable && + (amdgpu_watchdog_timer.period < 1 || + amdgpu_watchdog_timer.period > 0x23)) { + dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n"); + amdgpu_watchdog_timer.period = 0x23; + } + data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, + amdgpu_watchdog_timer.period); + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + gfx_v9_4_2_select_se_sh(adev, i, 0xffffffff, 0xffffffff); + WREG32_SOC15(GC, 0, regSQ_TIMEOUT_CONFIG, data); + } + gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) +{ + WREG32_SOC15_RLC_EX(reg, GC, 0, regSQ_IND_INDEX, + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | + (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | + (address << SQ_IND_INDEX__INDEX__SHIFT) | + (SQ_IND_INDEX__FORCE_READ_MASK)); + return RREG32_SOC15(GC, 0, regSQ_IND_DATA); +} + +static void gfx_v9_4_2_log_cu_timeout_status(struct amdgpu_device *adev, + uint32_t status) +{ + struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + uint32_t i, simd, wave; + uint32_t wave_status; + uint32_t wave_pc_lo, wave_pc_hi; + uint32_t wave_exec_lo, wave_exec_hi; + uint32_t wave_inst_dw0, wave_inst_dw1; + uint32_t wave_ib_sts; + + for (i = 0; i < 32; i++) { + if (!((i << 1) & status)) + continue; + + simd = i / cu_info->max_waves_per_simd; + wave = i % cu_info->max_waves_per_simd; + + wave_status = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); + wave_pc_lo = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); + wave_pc_hi = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); + wave_exec_lo = + wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); + wave_exec_hi = + wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); + wave_inst_dw0 = + wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); + wave_inst_dw1 = + wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); + wave_ib_sts = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); + + dev_info( + adev->dev, + "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n", + simd, wave, wave_status, + ((uint64_t)wave_pc_hi << 32 | wave_pc_lo), + ((uint64_t)wave_exec_hi << 32 | wave_exec_lo), + ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0), + wave_ib_sts); + } +} + +static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev) +{ + uint32_t se_idx, sh_idx, cu_idx; + uint32_t status; + + mutex_lock(&adev->grbm_idx_mutex); + for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; + se_idx++) { + for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; + sh_idx++) { + for (cu_idx = 0; + cu_idx < adev->gfx.config.max_cu_per_sh; + cu_idx++) { + gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx, + cu_idx); + status = RREG32_SOC15(GC, 0, + regSQ_TIMEOUT_STATUS); + if (status != 0) { + dev_info( + adev->dev, + "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n", + se_idx, sh_idx, cu_idx); + gfx_v9_4_2_log_cu_timeout_status( + adev, status); + } + /* clear old status */ + WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0); + } + } + } + gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) +{ + uint32_t se_idx, sh_idx, cu_idx; + + mutex_lock(&adev->grbm_idx_mutex); + for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; + se_idx++) { + for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; + sh_idx++) { + for (cu_idx = 0; + cu_idx < adev->gfx.config.max_cu_per_sh; + cu_idx++) { + gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx, + cu_idx); + WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0); + } + } + } + gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = { + .ras_late_init = amdgpu_gfx_ras_late_init, + .ras_fini = amdgpu_gfx_ras_fini, + .ras_error_inject = &gfx_v9_4_2_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, + .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, + .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h new file mode 100644 index 000000000000..6db1f88509af --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V9_4_2_H__ +#define __GFX_V9_4_2_H__ + +void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, + uint32_t first_vmid, uint32_t last_vmid); +void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, + uint32_t die_id); +void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev); +int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev); + +extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs; + +#endif /* __GFX_V9_4_2_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index e91bd7945777..bda1542ef1dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -22,6 +22,7 @@ */ #include "amdgpu.h" #include "gfxhub_v1_0.h" +#include "gfxhub_v1_1.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" @@ -30,40 +31,61 @@ #include "soc15_common.h" -u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev) +static u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev) { return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24; } -void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base) +static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, + uint32_t vmid, + uint64_t page_table_base) { - /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */ - int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - offset * vmid, lower_32_bits(page_table_base)); + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - offset * vmid, upper_32_bits(page_table_base)); + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); } static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + uint64_t pt_base; - gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); + if (adev->gmc.pdb0_bo) + pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); + else + pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.gart_start >> 12)); - WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.gart_start >> 44)); + gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); - WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->gmc.gart_end >> 44)); + /* If use GART for FB translation, vmid0 page table covers both + * vram and system memory (gart) + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.fb_start >> 12)); + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.fb_start >> 44)); + + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } else { + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } } static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) @@ -75,40 +97,56 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - /* Program the system aperture low logical page number. */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { + /* Program the system aperture low logical page number. */ + WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + /* + * Raven2 has a HW issue that it is unable to use the + * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. + * So here is the workaround that increase system + * aperture high address (add 1) to get rid of the VM + * fault and hardware hang. + */ + WREG32_SOC15_RLC(GC, 0, + mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max((adev->gmc.fb_end >> 18) + 0x1, + adev->gmc.agp_end >> 18)); + else + WREG32_SOC15_RLC( + GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + } - if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) - /* - * Raven2 has a HW issue that it is unable to use the vram which - * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the - * workaround that increase system aperture high address (add 1) - * to get rid of the VM fault and hardware hang. - */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max((adev->gmc.fb_end >> 18) + 0x1, - adev->gmc.agp_end >> 18)); - else - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); - - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); - - /* Program "protection fault". */ - WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, - (u32)(adev->dummy_page_addr >> 12)); - WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, - (u32)((u64)adev->dummy_page_addr >> 44)); - - WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2, - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + /* In the case squeezing vram into GART aperture, we don't use + * FB aperture and AGP aperture. Disable them. + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0); + WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); + WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0); + WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF); + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + } } static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) @@ -166,8 +204,13 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + if (adev->gmc.xgmi.connected_to_cpu) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + } WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp); } @@ -177,7 +220,10 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); - tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, + adev->gmc.vmid0_page_table_depth); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, + adev->gmc.vmid0_page_table_block_size); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp); @@ -202,6 +248,7 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -236,68 +283,70 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, block_size); - /* Send no-retry XNACK on fault to suppress VM fault storm. */ + /* Send no-retry XNACK on fault to suppress VM fault storm. + * On Aldebaran, XNACK can be enabled in the SQ per-process. + * Retry faults need to be enabled for that to work. + */ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !amdgpu_noretry); - WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp); - WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); - WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); - WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2, - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2, - upper_32_bits(adev->vm_manager.max_pfn - 1)); + !adev->gmc.noretry || + adev->asic_type == CHIP_ALDEBARAN); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); } } static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; unsigned i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, - 2 * i, 0xffffffff); + i * hub->eng_addr_distance, 0xffffffff); WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, - 2 * i, 0x1f); + i * hub->eng_addr_distance, 0x1f); } } -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) +static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ gfxhub_v1_0_init_gart_aperture_regs(adev); gfxhub_v1_0_init_system_aperture_regs(adev); gfxhub_v1_0_init_tlb_regs(adev); - gfxhub_v1_0_init_cache_regs(adev); + if (!amdgpu_sriov_vf(adev)) + gfxhub_v1_0_init_cache_regs(adev); gfxhub_v1_0_enable_system_domain(adev); - gfxhub_v1_0_disable_identity_aperture(adev); + if (!amdgpu_sriov_vf(adev)) + gfxhub_v1_0_disable_identity_aperture(adev); gfxhub_v1_0_setup_vmid_config(adev); gfxhub_v1_0_program_invalidation(adev); return 0; } -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) +static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; u32 tmp; u32 i; /* Disable all tables */ for (i = 0; i < 16; i++) - WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); /* Setup TLB control */ tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL); @@ -319,8 +368,8 @@ void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, - bool value) +static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, + bool value) { u32 tmp; tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); @@ -353,11 +402,11 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, CRASH_ON_NO_RETRY_FAULT, 1); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, CRASH_ON_RETRY_FAULT, 1); - } + } WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); } -void gfxhub_v1_0_init(struct amdgpu_device *adev) +static void gfxhub_v1_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; @@ -379,4 +428,22 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev) SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS); hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; } + + +const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = { + .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset, + .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs, + .gart_enable = gfxhub_v1_0_gart_enable, + .gart_disable = gfxhub_v1_0_gart_disable, + .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default, + .init = gfxhub_v1_0_init, + .get_xgmi_info = gfxhub_v1_1_get_xgmi_info, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h index 92d3a70cd9b1..3174bc5766fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h @@ -24,13 +24,6 @@ #ifndef __GFXHUB_V1_0_H__ #define __GFXHUB_V1_0_H__ -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev); -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev); -void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void gfxhub_v1_0_init(struct amdgpu_device *adev); -u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); -void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); +extern const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index c0ab71df0d90..497b86c376c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -28,13 +28,42 @@ #include "soc15_common.h" +#define mmMC_VM_XGMI_LFB_CNTL_ALDE 0x0978 +#define mmMC_VM_XGMI_LFB_CNTL_ALDE_BASE_IDX 0 +#define mmMC_VM_XGMI_LFB_SIZE_ALDE 0x0979 +#define mmMC_VM_XGMI_LFB_SIZE_ALDE_BASE_IDX 0 +//MC_VM_XGMI_LFB_CNTL +#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_LFB_REGION__SHIFT 0x0 +#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_MAX_REGION__SHIFT 0x4 +#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_LFB_REGION_MASK 0x0000000FL +#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_MAX_REGION_MASK 0x000000F0L +//MC_VM_XGMI_LFB_SIZE +#define MC_VM_XGMI_LFB_SIZE_ALDE__PF_LFB_SIZE__SHIFT 0x0 +#define MC_VM_XGMI_LFB_SIZE_ALDE__PF_LFB_SIZE_MASK 0x0001FFFFL + int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) { - u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); - u32 max_region = + u32 max_num_physical_nodes; + u32 max_physical_node_id; + u32 xgmi_lfb_cntl; + u32 max_region; + u64 seg_size; + + if (adev->asic_type == CHIP_ALDEBARAN) { + xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL_ALDE); + seg_size = REG_GET_FIELD( + RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE), + MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + } else { + xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); + seg_size = REG_GET_FIELD( + RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), + MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + } + + max_region = REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); - u32 max_num_physical_nodes = 0; - u32 max_physical_node_id = 0; + switch (adev->asic_type) { case CHIP_VEGA20: @@ -45,23 +74,29 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) max_num_physical_nodes = 8; max_physical_node_id = 7; break; + case CHIP_ALDEBARAN: + max_num_physical_nodes = 16; + max_physical_node_id = 15; + break; default: return -EINVAL; } /* PF_MAX_REGION=0 means xgmi is disabled */ - if (max_region) { + if (max_region || adev->gmc.xgmi.connected_to_cpu) { adev->gmc.xgmi.num_physical_nodes = max_region + 1; + if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) return -EINVAL; adev->gmc.xgmi.physical_node_id = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION); + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, + PF_LFB_REGION); + if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) return -EINVAL; - adev->gmc.xgmi.node_segment_size = REG_GET_FIELD( - RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), - MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + + adev->gmc.xgmi.node_segment_size = seg_size; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index b70c7b483c24..14c1c1a297dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -31,7 +31,78 @@ #include "soc15_common.h" -u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev) +static const char *gfxhub_client_ids[] = { + "CB/DB", + "Reserved", + "GE1", + "GE2", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG", + "Reserved", + "SDMA0", + "SDMA1", + "GCR", + "SDMA2", + "SDMA3", +}; + +static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + u32 cid = REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, CID); + + dev_err(adev->dev, + "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], + cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, RW)); +} + +static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE); @@ -41,23 +112,23 @@ u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev) return base; } -u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) +static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) { return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24; } -void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, +static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */ - int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - offset * vmid, lower_32_bits(page_table_base)); + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - offset * vmid, upper_32_bits(page_table_base)); + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); } static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) @@ -81,24 +152,25 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Disable AGP. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); - - /* Program the system aperture low logical page number. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); - - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); + if (!amdgpu_sriov_vf(adev)) { + /* Program the AGP BAR */ + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + } /* Program "protection fault". */ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, @@ -135,6 +207,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + /* These regs are not accessible for VF, PF will program these in SRIOV */ + if (amdgpu_sriov_vf(adev)) + return; + /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -170,6 +246,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp); + + tmp = mmGCVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp); } static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev) @@ -203,6 +283,7 @@ static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; int i; uint32_t tmp; @@ -231,43 +312,37 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) /* Send no-retry XNACK on fault to suppress VM fault storm. */ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !amdgpu_noretry); - WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i, tmp); - WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2, - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2, - upper_32_bits(adev->vm_manager.max_pfn - 1)); + !adev->gmc.noretry); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); } } static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; unsigned i; for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, - 2 * i, 0xffffffff); + i * hub->eng_addr_distance, 0xffffffff); WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, - 2 * i, 0x1f); + i * hub->eng_addr_distance, 0x1f); } } -int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) +static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ gfxhub_v2_0_init_gart_aperture_regs(adev); gfxhub_v2_0_init_system_aperture_regs(adev); @@ -282,14 +357,16 @@ int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) return 0; } -void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) +static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; u32 tmp; u32 i; /* Disable all tables */ for (i = 0; i < 16; i++) - WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); /* Setup TLB control */ tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL); @@ -298,9 +375,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 0); WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp); - /* Setup L2 cache */ - WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); - WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0); + if (!amdgpu_sriov_vf(adev)) { + /* Setup L2 cache */ + WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0); + } } /** @@ -309,7 +388,7 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, +static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; @@ -346,7 +425,12 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp); } -void gfxhub_v2_0_init(struct amdgpu_device *adev) +static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = { + .print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status, + .get_invalidate_req = gfxhub_v2_0_get_invalidate_req, +}; + +static void gfxhub_v2_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; @@ -368,4 +452,32 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev) SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS); hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ - + mmGCVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs; } + +const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = { + .get_fb_location = gfxhub_v2_0_get_fb_location, + .get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset, + .setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs, + .gart_enable = gfxhub_v2_0_gart_enable, + .gart_disable = gfxhub_v2_0_gart_disable, + .set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default, + .init = gfxhub_v2_0_init, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h index 392b8cd94fc0..9ddc35cd53d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h @@ -24,14 +24,6 @@ #ifndef __GFXHUB_V2_0_H__ #define __GFXHUB_V2_0_H__ -u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev); -int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev); -void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev); -void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void gfxhub_v2_0_init(struct amdgpu_device *adev); -u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev); -void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); +extern const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c new file mode 100644 index 000000000000..e80d1dc43079 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -0,0 +1,583 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "gfxhub_v2_1.h" + +#include "gc/gc_10_3_0_offset.h" +#include "gc/gc_10_3_0_sh_mask.h" +#include "gc/gc_10_3_0_default.h" +#include "navi10_enum.h" + +#include "soc15_common.h" + +#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP 0x16f8 +#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX 0 + +static const char *gfxhub_client_ids[] = { + "CB/DB", + "Reserved", + "GE1", + "GE2", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG", + "Reserved", + "SDMA0", + "SDMA1", + "GCR", + "SDMA2", + "SDMA3", +}; + +static uint32_t gfxhub_v2_1_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + u32 cid = REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, CID); + + dev_err(adev->dev, + "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], + cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, RW)); +} + +static u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev) +{ + u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE); + + base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK; + base <<= 24; + + return base; +} + +static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev) +{ + return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24; +} + +static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); + + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); +} + +static void gfxhub_v2_1_init_gart_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + gfxhub_v2_1_setup_vm_pt_regs(adev, 0, pt_base); + + WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); +} + +static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t value; + + /* Program the AGP BAR */ + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); +} + + +static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC); /* UC, uncached */ + + WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp); +} + +static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + + /* Setup L2 cache */ + tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, + ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, + L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp); + + tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp); + + tmp = mmGCVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp); + + tmp = mmGCVM_L2_CNTL4_DEFAULT; + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp); + + tmp = mmGCVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp); +} + +static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp); +} + +static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev) +{ + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0xFFFFFFFF); + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); + + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, + 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, + 0); + + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); + +} + +static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + int i; + uint32_t tmp; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + adev->vm_manager.num_level); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !adev->gmc.noretry); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } +} + +static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + unsigned i; + + for (i = 0 ; i < 18; ++i) { + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } +} + +static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) { + /* + * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are + * VF copy registers so vbios post doesn't program them, for + * SRIOV driver need to program them + */ + WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, + adev->gmc.vram_start >> 24); + WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, + adev->gmc.vram_end >> 24); + } + + /* GART Enable. */ + gfxhub_v2_1_init_gart_aperture_regs(adev); + gfxhub_v2_1_init_system_aperture_regs(adev); + gfxhub_v2_1_init_tlb_regs(adev); + gfxhub_v2_1_init_cache_regs(adev); + + gfxhub_v2_1_enable_system_domain(adev); + gfxhub_v2_1_disable_identity_aperture(adev); + gfxhub_v2_1_setup_vmid_config(adev); + gfxhub_v2_1_program_invalidation(adev); + + return 0; +} + +static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + u32 tmp; + u32 i; + + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp); + + /* Setup L2 cache */ + WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0); +} + +/** + * gfxhub_v2_1_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev, + bool value) +{ + u32 tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + + tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp); +} + +static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = { + .print_l2_protection_fault_status = gfxhub_v2_1_print_l2_protection_fault_status, + .get_invalidate_req = gfxhub_v2_1_get_invalidate_req, +}; + +static void gfxhub_v2_1_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(GC, 0, + mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(GC, 0, + mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ - + mmGCVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs; +} + +static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev) +{ + u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL); + u32 max_region = + REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); + u32 max_num_physical_nodes = 0; + u32 max_physical_node_id = 0; + + switch (adev->ip_versions[XGMI_HWIP][0]) { + case IP_VERSION(4, 8, 0): + max_num_physical_nodes = 4; + max_physical_node_id = 3; + break; + default: + return -EINVAL; + } + + /* PF_MAX_REGION=0 means xgmi is disabled */ + if (max_region) { + adev->gmc.xgmi.num_physical_nodes = max_region + 1; + if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) + return -EINVAL; + + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_LFB_REGION); + if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) + return -EINVAL; + + adev->gmc.xgmi.node_segment_size = REG_GET_FIELD( + RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_SIZE), + GCMC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + } + + return 0; +} + +static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) +{ + int i; + u32 tmp = 0, disabled_sa = 0; + u32 efuse_setting, vbios_setting; + + u32 max_sa_mask = amdgpu_gfx_create_bitmask( + adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines); + + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) { + /* Get SA disabled bitmap from eFuse setting */ + efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE); + efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK; + efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + /* Get SA disabled bitmap from VBIOS setting */ + vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE); + vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK; + vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + disabled_sa |= efuse_setting | vbios_setting; + /* Make sure not to report harvested SAs beyond the max SA count */ + disabled_sa &= max_sa_mask; + + for (i = 0; disabled_sa > 0; i++) { + if (disabled_sa & 1) + tmp |= 0x3 << (i * 2); + disabled_sa >>= 1; + } + disabled_sa = tmp; + + WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa); + } +} + +const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = { + .get_fb_location = gfxhub_v2_1_get_fb_location, + .get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset, + .setup_vm_pt_regs = gfxhub_v2_1_setup_vm_pt_regs, + .gart_enable = gfxhub_v2_1_gart_enable, + .gart_disable = gfxhub_v2_1_gart_disable, + .set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default, + .init = gfxhub_v2_1_init, + .get_xgmi_info = gfxhub_v2_1_get_xgmi_info, + .utcl2_harvest = gfxhub_v2_1_utcl2_harvest, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h index ed422012c8c6..f75c2eccfad9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Advanced Micro Devices, Inc. + * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,10 +21,9 @@ * */ -#ifndef __DCE_VIRTUAL_H__ -#define __DCE_VIRTUAL_H__ +#ifndef __GFXHUB_V2_1_H__ +#define __GFXHUB_V2_1_H__ -extern const struct amdgpu_ip_block_version dce_virtual_ip_block; +extern const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs; #endif - diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index f5725336a5f2..3ec5ff5a6dbe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -25,11 +25,10 @@ #include "amdgpu.h" #include "amdgpu_atomfirmware.h" #include "gmc_v10_0.h" +#include "umc_v8_7.h" -#include "hdp/hdp_5_0_0_offset.h" -#include "hdp/hdp_5_0_0_sh_mask.h" -#include "gc/gc_10_1_0_sh_mask.h" -#include "mmhub/mmhub_2_0_0_sh_mask.h" +#include "athub/athub_2_0_0_sh_mask.h" +#include "athub/athub_2_0_0_offset.h" #include "dcn/dcn_2_0_0_offset.h" #include "dcn/dcn_2_0_0_sh_mask.h" #include "oss/osssys_5_0_0_offset.h" @@ -37,15 +36,17 @@ #include "navi10_enum.h" #include "soc15.h" +#include "soc15d.h" #include "soc15_common.h" #include "nbio_v2_3.h" #include "gfxhub_v2_0.h" +#include "gfxhub_v2_1.h" #include "mmhub_v2_0.h" +#include "mmhub_v2_3.h" #include "athub_v2_0.h" -/* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/ -#define AMDGPU_NUM_OF_VMIDS 8 +#include "athub_v2_1.h" #if 0 static const struct soc15_reg_golden golden_settings_navi10_hdp[] = @@ -54,68 +55,31 @@ static const struct soc15_reg_golden golden_settings_navi10_hdp[] = }; #endif +static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { - struct amdgpu_vmhub *hub; - u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i; - - bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; - - bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; - switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp &= ~bits[AMDGPU_MMHUB_0]; - WREG32(reg, tmp); - } - + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp &= ~bits[AMDGPU_GFXHUB_0]; - WREG32(reg, tmp); - } + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp |= bits[AMDGPU_MMHUB_0]; - WREG32(reg, tmp); - } - + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); - tmp |= bits[AMDGPU_GFXHUB_0]; - WREG32(reg, tmp); - } + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); break; default: break; @@ -128,62 +92,74 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + bool retry_fault = !!(entry->src_data[1] & 0x80); + bool write_fault = !!(entry->src_data[1] & 0x20); struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; + struct amdgpu_task_info task_info; uint32_t status = 0; u64 addr; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; + if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it onyl if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + return 1; + } + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if (entry->vmid_src == AMDGPU_GFXHUB_0) + if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); } - if (printk_ratelimit()) { - struct amdgpu_task_info task_info; - - memset(&task_info, 0, sizeof(struct amdgpu_task_info)); - amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); - - dev_err(adev->dev, - "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " - "for process %s pid %d thread %s pid %d)\n", - entry->vmid_src ? "mmhub" : "gfxhub", - entry->src_id, entry->ring_id, entry->vmid, - entry->pasid, task_info.process_name, task_info.tgid, - task_info.task_name, task_info.pid); - dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", - addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) { - dev_err(adev->dev, - "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", - status); - dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); - dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); - dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); - dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); - dev_err(adev->dev, "\t RW: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, RW)); - } - } + if (!printk_ratelimit()) + return 0; + + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); + amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); + + dev_err(adev->dev, + "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " + "for process %s pid %d thread %s pid %d)\n", + entry->vmid_src ? "mmhub" : "gfxhub", + entry->src_id, entry->ring_id, entry->vmid, + entry->pasid, task_info.process_name, task_info.tgid, + task_info.task_name, task_info.pid); + dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n", + addr, entry->client_id, + soc15_ih_clientid_name[entry->client_id]); + + if (!amdgpu_sriov_vf(adev)) + hub->vmhub_funcs->print_l2_protection_fault_status(adev, + status); return 0; } @@ -193,30 +169,20 @@ static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = { .process = gmc_v10_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = { + .set = gmc_v10_0_ecc_interrupt_state, + .process = amdgpu_umc_process_ecc_irq, +}; + static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) { adev->gmc.vm_fault.num_types = 1; adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; -} - -static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, - uint32_t flush_type) -{ - u32 req = 0; - - /* invalidate using legacy mode on vmid*/ - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vmid); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, - CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); - return req; + if (!amdgpu_sriov_vf(adev)) { + adev->gmc.ecc_irq.num_types = 1; + adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs; + } } /** @@ -234,6 +200,19 @@ static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, (!amdgpu_sriov_vf(adev))); } +static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info( + struct amdgpu_device *adev, + uint8_t vmid, uint16_t *p_pasid) +{ + uint32_t value; + + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; + + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -246,10 +225,15 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); + u32 tmp; /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; + unsigned char hub_ip = 0; + + hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -263,7 +247,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, hub_ip); + if (tmp & 0x1) break; udelay(1); @@ -273,18 +259,24 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + + hub->eng_distance * eng, + inv_req, hub_ip); /* * Issue a dummy read to wait for the ACK register to be cleared * to avoid a false ACK due to the new fast GRBM interface. */ - if (vmhub == AMDGPU_GFXHUB_0) - RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); + if ((vmhub == AMDGPU_GFXHUB_0) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) + RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + + hub->eng_distance * eng, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + + hub->eng_distance * eng, hub_ip); + tmp &= 1 << vmid; if (tmp) break; @@ -298,14 +290,15 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0, hub_ip); spin_unlock(&adev->gmc.invalidate_lock); if (i < adev->usec_timeout) return; - DRM_ERROR("Timeout waiting for VM flush ACK!\n"); + DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub); } /** @@ -313,6 +306,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * * @adev: amdgpu_device pointer * @vmid: vm instance to flush + * @vmhub: vmhub type + * @flush_type: the flush type * * Flush the TLB for the requested page table. */ @@ -326,7 +321,26 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, int r; /* flush hdp cache */ - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); + + /* For SRIOV run time, driver shouldn't access the register through MMIO + * Directly use kiq to do the vm invalidation instead + */ + if (adev->gfx.kiq.ring.sched.ready && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && + down_read_trylock(&adev->reset_sem)) { + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; + const unsigned eng = 17; + u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); + u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; + u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; + + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid); + + up_read(&adev->reset_sem); + return; + } mutex_lock(&adev->mman.gtt_window_lock); @@ -340,7 +354,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || - adev->in_gpu_reset || + amdgpu_in_reset(adev) || ring->sched.ready == false) { gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); mutex_unlock(&adev->mman.gtt_window_lock); @@ -352,7 +366,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * translation. Avoid this by doing the invalidation from the SDMA * itself. */ - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job); + r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, + &job); if (r) goto error_alloc; @@ -380,12 +395,78 @@ error_alloc: DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); } +/** + * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * @flush_type: the flush type + * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB() + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid, i; + signed long r; + uint32_t seq; + uint16_t queried_pasid; + bool ret; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + if (amdgpu_emu_mode == 0 && ring->sched.ready) { + spin_lock(&adev->gfx.kiq.ring_lock); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, flush_type, all_hub); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) { + amdgpu_ring_undo(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + return -ETIME; + } + + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + if (r < 1) { + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); + return -ETIME; + } + + return 0; + } + + for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { + + ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + if (all_hub) { + for (i = 0; i < adev->num_vmhubs; i++) + gmc_v10_0_flush_gpu_tlb(adev, vmid, + i, flush_type); + } else { + gmc_v10_0_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, flush_type); + } + break; + } + } + + return 0; +} + static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); + uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; /* @@ -399,16 +480,21 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, if (use_semaphore) /* a read return value of 1 means semaphore acuqire */ amdgpu_ring_emit_reg_wait(ring, - hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0x1, 0x1); - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + + (hub->ctx_addr_distance * vmid), lower_32_bits(pd_addr)); - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + + (hub->ctx_addr_distance * vmid), upper_32_bits(pd_addr)); - amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, - hub->vm_inv_eng0_ack + eng, + amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + + hub->eng_distance * eng, + hub->vm_inv_eng0_ack + + hub->eng_distance * eng, req, 1 << vmid); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ @@ -417,7 +503,8 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, * add semaphore release after invalidation, * write with 0 means semaphore release */ - amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0); return pd_addr; } @@ -439,7 +526,8 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid /* * PTE format on NAVI 10: * 63:59 reserved - * 58:57 reserved + * 58 reserved and for sienna_cichlid is used for MALL noalloc + * 57 reserved * 56 F * 55 L * 54 reserved @@ -490,8 +578,7 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) - *addr = adev->vm_manager.vram_base_offset + *addr - - adev->gmc.vram_start; + *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); BUG_ON(*addr & 0xFFFF00000000003FULL); if (!adev->gmc.translate_further) @@ -529,13 +616,37 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, } } +static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + u32 pitch; + + viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * + 4); + } + + return size; +} + static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, .map_mtype = gmc_v10_0_map_mtype, .get_vm_pde = gmc_v10_0_get_vm_pde, - .get_vm_pte = gmc_v10_0_get_vm_pte + .get_vm_pte = gmc_v10_0_get_vm_pte, + .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size, }; static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -544,12 +655,63 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; } +static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[UMC_HWIP][0]) { + case IP_VERSION(8, 7, 0): + adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; + adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA; + adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; + adev->umc.ras_funcs = &umc_v8_7_ras_funcs; + break; + default: + break; + } +} + + +static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 4, 0): + adev->mmhub.funcs = &mmhub_v2_3_funcs; + break; + default: + adev->mmhub.funcs = &mmhub_v2_0_funcs; + break; + } +} + +static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + adev->gfxhub.funcs = &gfxhub_v2_1_funcs; + break; + default: + adev->gfxhub.funcs = &gfxhub_v2_0_funcs; + break; + } +} + + static int gmc_v10_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v10_0_set_mmhub_funcs(adev); + gmc_v10_0_set_gfxhub_funcs(adev); gmc_v10_0_set_gmc_funcs(adev); gmc_v10_0_set_irq_funcs(adev); + gmc_v10_0_set_umc_funcs(adev); adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = @@ -564,22 +726,15 @@ static int gmc_v10_0_early_init(void *handle) static int gmc_v10_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; - unsigned i; - - for(i = 0; i < adev->num_rings; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - unsigned vmhub = ring->funcs->vmhub; + int r; - ring->vm_inv_eng = vm_inv_eng[vmhub]++; - dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", - ring->idx, ring->name, ring->vm_inv_eng, - ring->funcs->vmhub); - } + r = amdgpu_gmc_allocate_vm_inv_eng(adev); + if (r) + return r; - /* Engine 17 is used for GART flushes */ - for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 17); + r = amdgpu_gmc_ras_late_init(adev); + if (r) + return r; return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); } @@ -589,13 +744,21 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, { u64 base = 0; - base = gfxhub_v2_0_get_fb_location(adev); + base = adev->gfxhub.funcs->get_fb_location(adev); + + /* add the xgmi offset of the physical node */ + base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc); + amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ - adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev); + adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); + + /* add the xgmi offset of the physical node */ + adev->vm_manager.vram_base_offset += + adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; } /** @@ -609,31 +772,37 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) { - /* Could aper size report 0 ? */ - adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); - adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); + int r; /* size in MB on si */ adev->gmc.mc_vram_size = adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; - adev->gmc.visible_vram_size = adev->gmc.aper_size; + + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_device_resize_fb_bar(adev); + if (r) + return r; + } + adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); + adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); + +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) { + adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); + adev->gmc.aper_size = adev->gmc.real_vram_size; + } +#endif /* In case the PCI BAR is larger than the actual amount of vram */ + adev->gmc.visible_vram_size = adev->gmc.aper_size; if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ - if (amdgpu_gart_size == -1) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - default: - adev->gmc.gart_size = 512ULL << 20; - break; - } - } else + if (amdgpu_gart_size == -1) + adev->gmc.gart_size = 512ULL << 20; + else adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; gmc_v10_0_vram_gtt_location(adev, &adev->gmc); @@ -662,59 +831,43 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) -{ - u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; - - if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ - } else { - u32 viewport; - u32 pitch; - - viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); - pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH); - size = (REG_GET_FIELD(viewport, - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * - REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * - 4); - } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) { - DRM_ERROR("Warning: pre-OS buffer uses most of vram, \ - be aware of gart table overwrite\n"); - return 0; - } - - return size; -} - - - static int gmc_v10_0_sw_init(void *handle) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gfxhub_v2_0_init(adev); - mmhub_v2_0_init(adev); + adev->gfxhub.funcs->init(adev); + + adev->mmhub.funcs->init(adev); spin_lock_init(&adev->gmc.invalidate_lock); - r = amdgpu_atomfirmware_get_vram_info(adev, - &vram_width, &vram_type, &vram_vendor); - if (!amdgpu_emu_mode) - adev->gmc.vram_width = vram_width; - else + if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) { + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; + adev->gmc.vram_width = 64; + } else if (amdgpu_emu_mode == 1) { + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6; adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ + } else { + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + adev->gmc.vram_width = vram_width; - adev->gmc.vram_type = vram_type; - adev->gmc.vram_vendor = vram_vendor; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; + } + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->num_vmhubs = 2; /* * To fulfill 4-level page support, @@ -731,38 +884,48 @@ static int gmc_v10_0_sw_init(void *handle) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, &adev->gmc.vm_fault); + + if (r) + return r; + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, &adev->gmc.vm_fault); if (r) return r; + if (!amdgpu_sriov_vf(adev)) { + /* interrupt sent to DF. */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, + &adev->gmc.ecc_irq); + if (r) + return r; + } + /* * Set the internal MC address mask This is the max address of the GPU's * internal address space. */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ - /* - * Reserve 8M stolen memory for navi10 like vega10 - * TODO: will check if it's really needed on asic. - */ - if (amdgpu_emu_mode == 1) - adev->gmc.stolen_size = 0; - else - adev->gmc.stolen_size = 9 * 1024 *1024; - r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); if (r) { printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); return r; } + if (adev->gmc.xgmi.supported) { + r = adev->gfxhub.funcs->get_xgmi_info(adev); + if (r) + return r; + } + r = gmc_v10_0_mc_init(adev); if (r) return r; - adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); + amdgpu_gmc_get_reserved_allocation(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -779,8 +942,7 @@ static int gmc_v10_0_sw_init(void *handle) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; - adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.first_kfd_vmid = 8; amdgpu_vm_manager_init(adev); @@ -788,7 +950,7 @@ static int gmc_v10_0_sw_init(void *handle) } /** - * gmc_v8_0_gart_fini - vm fini callback + * gmc_v10_0_gart_fini - vm fini callback * * @adev: amdgpu_device pointer * @@ -797,7 +959,6 @@ static int gmc_v10_0_sw_init(void *handle) static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) { amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); } static int gmc_v10_0_sw_fini(void *handle) @@ -814,14 +975,6 @@ static int gmc_v10_0_sw_fini(void *handle) static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - break; - default: - break; - } } /** @@ -833,7 +986,6 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) { int r; bool value; - u32 tmp; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -844,29 +996,24 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - r = gfxhub_v2_0_gart_enable(adev); + r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; - r = mmhub_v2_0_gart_enable(adev); + r = adev->mmhub.funcs->gart_enable(adev); if (r) return r; - tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); - tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; - WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); - - tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); - WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); + adev->hdp.funcs->init_registers(adev); /* Flush HDP after it is initialized */ - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; - gfxhub_v2_0_set_fault_enable_default(adev, value); - mmhub_v2_0_set_fault_enable_default(adev, value); + adev->gfxhub.funcs->set_fault_enable_default(adev, value); + adev->mmhub.funcs->set_fault_enable_default(adev, value); gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); @@ -887,10 +1034,20 @@ static int gmc_v10_0_hw_init(void *handle) /* The sequence of these two function calls matters.*/ gmc_v10_0_init_golden_registers(adev); + /* + * harvestable groups in gc_utcl2 need to be programmed before any GFX block + * register setup within GMC, or else system hang when harvesting SA. + */ + if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) + adev->gfxhub.funcs->utcl2_harvest(adev); + r = gmc_v10_0_gart_enable(adev); if (r) return r; + if (adev->umc.funcs && adev->umc.funcs->init_registers) + adev->umc.funcs->init_registers(adev); + return 0; } @@ -903,8 +1060,8 @@ static int gmc_v10_0_hw_init(void *handle) */ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) { - gfxhub_v2_0_gart_disable(adev); - mmhub_v2_0_gart_disable(adev); + adev->gfxhub.funcs->gart_disable(adev); + adev->mmhub.funcs->gart_disable(adev); amdgpu_gart_table_vram_unpin(adev); } @@ -912,14 +1069,16 @@ static int gmc_v10_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v10_0_gart_disable(adev); + if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); return 0; } + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - gmc_v10_0_gart_disable(adev); return 0; } @@ -970,20 +1129,26 @@ static int gmc_v10_0_set_clockgating_state(void *handle, int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mmhub_v2_0_set_clockgating(adev, state); + r = adev->mmhub.funcs->set_clockgating(adev, state); if (r) return r; - return athub_v2_0_set_clockgating(adev, state); + if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) + return athub_v2_1_set_clockgating(adev, state); + else + return athub_v2_0_set_clockgating(adev, state); } static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - mmhub_v2_0_get_clockgating(adev, flags); + adev->mmhub.funcs->get_clockgating(adev, flags); - athub_v2_0_get_clockgating(adev, flags); + if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) + athub_v2_1_get_clockgating(adev, flags); + else + athub_v2_0_get_clockgating(adev, flags); } static int gmc_v10_0_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index b205039350b6..0fe714f54cca 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -61,17 +61,6 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin"); #define MC_SEQ_MISC0__MT__HBM 0x60000000 #define MC_SEQ_MISC0__MT__DDR3 0xB0000000 - -static const u32 crtc_offsets[6] = -{ - SI_CRTC0_REGISTER_OFFSET, - SI_CRTC1_REGISTER_OFFSET, - SI_CRTC2_REGISTER_OFFSET, - SI_CRTC3_REGISTER_OFFSET, - SI_CRTC4_REGISTER_OFFSET, - SI_CRTC5_REGISTER_OFFSET -}; - static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; @@ -357,6 +346,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; } + adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; gmc_v6_0_vram_gtt_location(adev, &adev->gmc); return 0; @@ -541,7 +531,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) * the VMs are determined by the application and setup and assigned * on the fly in the vm part of radeon_gart.c */ - for (i = 1; i < 16; i++) { + for (i = 1; i < AMDGPU_NUM_VMID; i++) { if (i < 8) WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, table_addr >> 12); @@ -802,8 +792,6 @@ static int gmc_v6_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_bo_late_init(adev); - if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); else @@ -816,16 +804,13 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) unsigned size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; return size; } @@ -856,12 +841,12 @@ static int gmc_v6_0_sw_init(void *handle) adev->gmc.mc_mask = 0xffffffffffULL; - r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); + r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); if (r) { - dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); + dev_warn(adev->dev, "No suitable DMA available.\n"); return r; } - adev->need_swiotlb = drm_need_swiotlb(44); + adev->need_swiotlb = drm_need_swiotlb(40); r = gmc_v6_0_init_microcode(adev); if (r) { @@ -873,7 +858,7 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); r = amdgpu_bo_init(adev); if (r) @@ -889,7 +874,7 @@ static int gmc_v6_0_sw_init(void *handle) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.first_kfd_vmid = 8; amdgpu_vm_manager_init(adev); /* base offset of vram pages */ @@ -913,7 +898,6 @@ static int gmc_v6_0_sw_fini(void *handle) amdgpu_vm_manager_fini(adev); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; @@ -1147,6 +1131,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { .set_prt = gmc_v6_0_set_prt, .get_vm_pde = gmc_v6_0_get_vm_pde, .get_vm_pte = gmc_v6_0_get_vm_pte, + .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size, }; static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index f08e5330642d..0a50fdaced7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU && + adev->gmc.real_vram_size > adev->gmc.aper_size) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } @@ -413,11 +414,46 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; } + adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; gmc_v7_0_vram_gtt_location(adev, &adev->gmc); return 0; } +/** + * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * @flush_type: type of flush + * @all_hub: flush all hubs + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid; + unsigned int tmp; + + if (amdgpu_in_reset(adev)) + return -EIO; + + for (vmid = 1; vmid < 16; vmid++) { + + tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && + (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); + RREG32(mmVM_INVALIDATE_RESPONSE); + break; + } + } + + return 0; +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -430,7 +466,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * @vmid: vm instance to flush - * + * @vmhub: which hub to flush + * @flush_type: type of flush + * * * Flush the TLB for the requested page table (CIK). */ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, @@ -478,7 +516,7 @@ static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, } /** - * gmc_v8_0_set_fault_enable_default - update VM fault handling + * gmc_v7_0_set_fault_enable_default - update VM fault handling * * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page @@ -640,7 +678,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) /* set vm size, must be a multiple of 4 */ WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); - for (i = 1; i < 16; i++) { + for (i = 1; i < AMDGPU_NUM_VMID; i++) { if (i < 8) WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, table_addr >> 12); @@ -729,6 +767,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value + * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value + * @pasid: debug logging only - no functional use * * Print human readable fault information (CIK). */ @@ -922,8 +962,6 @@ static int gmc_v7_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_bo_late_init(adev); - if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); else @@ -936,16 +974,14 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) unsigned size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; + return size; } @@ -986,7 +1022,7 @@ static int gmc_v7_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); if (r) { - pr_warn("amdgpu: No suitable DMA available\n"); + pr_warn("No suitable DMA available\n"); return r; } adev->need_swiotlb = drm_need_swiotlb(40); @@ -1001,7 +1037,7 @@ static int gmc_v7_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -1018,7 +1054,7 @@ static int gmc_v7_0_sw_init(void *handle) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.first_kfd_vmid = 8; amdgpu_vm_manager_init(adev); /* base offset of vram pages */ @@ -1049,7 +1085,6 @@ static int gmc_v7_0_sw_fini(void *handle) kfree(adev->gmc.vm_fault_info); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; @@ -1333,11 +1368,13 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .set_prt = gmc_v7_0_set_prt, .get_vm_pde = gmc_v7_0_get_vm_pde, - .get_vm_pte = gmc_v7_0_get_vm_pte + .get_vm_pte = gmc_v7_0_get_vm_pte, + .get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size, }; static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 6d96d40fbcb8..492ebed2915b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); @@ -230,39 +231,29 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) chip_name = "tonga"; break; case CHIP_POLARIS11: - if (((adev->pdev->device == 0x67ef) && - ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe5))) || - ((adev->pdev->device == 0x67ff) && - ((adev->pdev->revision == 0xcf) || - (adev->pdev->revision == 0xef) || - (adev->pdev->revision == 0xff)))) - chip_name = "polaris11_k"; - else if ((adev->pdev->device == 0x67ef) && - (adev->pdev->revision == 0xe2)) + if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || + ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) chip_name = "polaris11_k"; else chip_name = "polaris11"; break; case CHIP_POLARIS10: - if ((adev->pdev->device == 0x67df) && - ((adev->pdev->revision == 0xe1) || - (adev->pdev->revision == 0xf7))) + if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) chip_name = "polaris10_k"; else chip_name = "polaris10"; break; case CHIP_POLARIS12: - if (((adev->pdev->device == 0x6987) && - ((adev->pdev->revision == 0xc0) || - (adev->pdev->revision == 0xc3))) || - ((adev->pdev->device == 0x6981) && - ((adev->pdev->revision == 0x00) || - (adev->pdev->revision == 0x01) || - (adev->pdev->revision == 0x10)))) + if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) { chip_name = "polaris12_k"; - else - chip_name = "polaris12"; + } else { + WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159); + /* Polaris12 32bit ASIC needs a special MC firmware */ + if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40) + chip_name = "polaris12_32"; + else + chip_name = "polaris12"; + } break; case CHIP_FIJI: case CHIP_CARRIZO: @@ -615,11 +606,47 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; } + adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; gmc_v8_0_vram_gtt_location(adev, &adev->gmc); return 0; } +/** + * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * @flush_type: type of flush + * @all_hub: flush all hubs + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid; + unsigned int tmp; + + if (amdgpu_in_reset(adev)) + return -EIO; + + for (vmid = 1; vmid < 16; vmid++) { + + tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && + (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { + WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); + RREG32(mmVM_INVALIDATE_RESPONSE); + break; + } + } + + return 0; + +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -632,6 +659,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * * @adev: amdgpu_device pointer * @vmid: vm instance to flush + * @vmhub: which hub to flush + * @flush_type: type of flush * * Flush the TLB for the requested page table (VI). */ @@ -882,7 +911,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) /* set vm size, must be a multiple of 4 */ WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); - for (i = 1; i < 16; i++) { + for (i = 1; i < AMDGPU_NUM_VMID; i++) { if (i < 8) WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, table_addr >> 12); @@ -972,6 +1001,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value + * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value + * @pasid: debug logging only - no functional use * * Print human readable fault information (VI). */ @@ -1039,8 +1070,6 @@ static int gmc_v8_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_bo_late_init(adev); - if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); else @@ -1053,16 +1082,14 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) unsigned size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; + return size; } @@ -1111,7 +1138,7 @@ static int gmc_v8_0_sw_init(void *handle) r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); if (r) { - pr_warn("amdgpu: No suitable DMA available\n"); + pr_warn("No suitable DMA available\n"); return r; } adev->need_swiotlb = drm_need_swiotlb(40); @@ -1126,7 +1153,7 @@ static int gmc_v8_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -1143,7 +1170,7 @@ static int gmc_v8_0_sw_init(void *handle) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.first_kfd_vmid = 8; amdgpu_vm_manager_init(adev); /* base offset of vram pages */ @@ -1174,7 +1201,6 @@ static int gmc_v8_0_sw_fini(void *handle) kfree(adev->gmc.vm_fault_info); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; @@ -1700,11 +1726,13 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .set_prt = gmc_v8_0_set_prt, .get_vm_pde = gmc_v8_0_get_vm_pde, - .get_vm_pte = gmc_v8_0_get_vm_pte + .get_vm_pte = gmc_v8_0_get_vm_pte, + .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size, }; static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index a5b68b5e452f..cb82404df534 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -31,17 +31,17 @@ #include "amdgpu_atomfirmware.h" #include "amdgpu_gem.h" -#include "hdp/hdp_4_0_offset.h" -#include "hdp/hdp_4_0_sh_mask.h" #include "gc/gc_9_0_sh_mask.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" #include "vega10_enum.h" #include "mmhub/mmhub_1_0_offset.h" +#include "athub/athub_1_0_sh_mask.h" #include "athub/athub_1_0_offset.h" #include "oss/osssys_4_0_offset.h" #include "soc15.h" +#include "soc15d.h" #include "soc15_common.h" #include "umc/umc_6_0_sh_mask.h" @@ -50,8 +50,12 @@ #include "athub_v1_0.h" #include "gfxhub_v1_1.h" #include "mmhub_v9_4.h" +#include "mmhub_v1_7.h" #include "umc_v6_1.h" #include "umc_v6_0.h" +#include "umc_v6_7.h" +#include "hdp_v4_0.h" +#include "mca_v3_0.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" @@ -65,22 +69,259 @@ #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 + + +static const char *gfxhub_client_ids[] = { + "CB", + "DB", + "IA", + "WD", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG", + "PA", +}; -/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ -#define AMDGPU_NUM_OF_VMIDS 8 +static const char *mmhub_client_ids_raven[][2] = { + [0][0] = "MP1", + [1][0] = "MP0", + [2][0] = "VCN", + [3][0] = "VCNU", + [4][0] = "HDP", + [5][0] = "DCE", + [13][0] = "UTCL2", + [19][0] = "TLS", + [26][0] = "OSS", + [27][0] = "SDMA0", + [0][1] = "MP1", + [1][1] = "MP0", + [2][1] = "VCN", + [3][1] = "VCNU", + [4][1] = "HDP", + [5][1] = "XDP", + [6][1] = "DBGU0", + [7][1] = "DCE", + [8][1] = "DCEDWB0", + [9][1] = "DCEDWB1", + [26][1] = "OSS", + [27][1] = "SDMA0", +}; -static const u32 golden_settings_vega10_hdp[] = -{ - 0xf64, 0x0fffffff, 0x00000000, - 0xf65, 0x0fffffff, 0x00000000, - 0xf66, 0x0fffffff, 0x00000000, - 0xf67, 0x0fffffff, 0x00000000, - 0xf68, 0x0fffffff, 0x00000000, - 0xf6a, 0x0fffffff, 0x00000000, - 0xf6b, 0x0fffffff, 0x00000000, - 0xf6c, 0x0fffffff, 0x00000000, - 0xf6d, 0x0fffffff, 0x00000000, - 0xf6e, 0x0fffffff, 0x00000000, +static const char *mmhub_client_ids_renoir[][2] = { + [0][0] = "MP1", + [1][0] = "MP0", + [2][0] = "HDP", + [4][0] = "DCEDMC", + [5][0] = "DCEVGA", + [13][0] = "UTCL2", + [19][0] = "TLS", + [26][0] = "OSS", + [27][0] = "SDMA0", + [28][0] = "VCN", + [29][0] = "VCNU", + [30][0] = "JPEG", + [0][1] = "MP1", + [1][1] = "MP0", + [2][1] = "HDP", + [3][1] = "XDP", + [6][1] = "DBGU0", + [7][1] = "DCEDMC", + [8][1] = "DCEVGA", + [9][1] = "DCEDWB", + [26][1] = "OSS", + [27][1] = "SDMA0", + [28][1] = "VCN", + [29][1] = "VCNU", + [30][1] = "JPEG", +}; + +static const char *mmhub_client_ids_vega10[][2] = { + [0][0] = "MP0", + [1][0] = "UVD", + [2][0] = "UVDU", + [3][0] = "HDP", + [13][0] = "UTCL2", + [14][0] = "OSS", + [15][0] = "SDMA1", + [32+0][0] = "VCE0", + [32+1][0] = "VCE0U", + [32+2][0] = "XDMA", + [32+3][0] = "DCE", + [32+4][0] = "MP1", + [32+14][0] = "SDMA0", + [0][1] = "MP0", + [1][1] = "UVD", + [2][1] = "UVDU", + [3][1] = "DBGU0", + [4][1] = "HDP", + [5][1] = "XDP", + [14][1] = "OSS", + [15][1] = "SDMA0", + [32+0][1] = "VCE0", + [32+1][1] = "VCE0U", + [32+2][1] = "XDMA", + [32+3][1] = "DCE", + [32+4][1] = "DCEDWB", + [32+5][1] = "MP1", + [32+6][1] = "DBGU1", + [32+14][1] = "SDMA1", +}; + +static const char *mmhub_client_ids_vega12[][2] = { + [0][0] = "MP0", + [1][0] = "VCE0", + [2][0] = "VCE0U", + [3][0] = "HDP", + [13][0] = "UTCL2", + [14][0] = "OSS", + [15][0] = "SDMA1", + [32+0][0] = "DCE", + [32+1][0] = "XDMA", + [32+2][0] = "UVD", + [32+3][0] = "UVDU", + [32+4][0] = "MP1", + [32+15][0] = "SDMA0", + [0][1] = "MP0", + [1][1] = "VCE0", + [2][1] = "VCE0U", + [3][1] = "DBGU0", + [4][1] = "HDP", + [5][1] = "XDP", + [14][1] = "OSS", + [15][1] = "SDMA0", + [32+0][1] = "DCE", + [32+1][1] = "DCEDWB", + [32+2][1] = "XDMA", + [32+3][1] = "UVD", + [32+4][1] = "UVDU", + [32+5][1] = "MP1", + [32+6][1] = "DBGU1", + [32+15][1] = "SDMA1", +}; + +static const char *mmhub_client_ids_vega20[][2] = { + [0][0] = "XDMA", + [1][0] = "DCE", + [2][0] = "VCE0", + [3][0] = "VCE0U", + [4][0] = "UVD", + [5][0] = "UVD1U", + [13][0] = "OSS", + [14][0] = "HDP", + [15][0] = "SDMA0", + [32+0][0] = "UVD", + [32+1][0] = "UVDU", + [32+2][0] = "MP1", + [32+3][0] = "MP0", + [32+12][0] = "UTCL2", + [32+14][0] = "SDMA1", + [0][1] = "XDMA", + [1][1] = "DCE", + [2][1] = "DCEDWB", + [3][1] = "VCE0", + [4][1] = "VCE0U", + [5][1] = "UVD1", + [6][1] = "UVD1U", + [7][1] = "DBGU0", + [8][1] = "XDP", + [13][1] = "OSS", + [14][1] = "HDP", + [15][1] = "SDMA0", + [32+0][1] = "UVD", + [32+1][1] = "UVDU", + [32+2][1] = "DBGU1", + [32+3][1] = "MP1", + [32+4][1] = "MP0", + [32+14][1] = "SDMA1", +}; + +static const char *mmhub_client_ids_arcturus[][2] = { + [0][0] = "DBGU1", + [1][0] = "XDP", + [2][0] = "MP1", + [14][0] = "HDP", + [171][0] = "JPEG", + [172][0] = "VCN", + [173][0] = "VCNU", + [203][0] = "JPEG1", + [204][0] = "VCN1", + [205][0] = "VCN1U", + [256][0] = "SDMA0", + [257][0] = "SDMA1", + [258][0] = "SDMA2", + [259][0] = "SDMA3", + [260][0] = "SDMA4", + [261][0] = "SDMA5", + [262][0] = "SDMA6", + [263][0] = "SDMA7", + [384][0] = "OSS", + [0][1] = "DBGU1", + [1][1] = "XDP", + [2][1] = "MP1", + [14][1] = "HDP", + [171][1] = "JPEG", + [172][1] = "VCN", + [173][1] = "VCNU", + [203][1] = "JPEG1", + [204][1] = "VCN1", + [205][1] = "VCN1U", + [256][1] = "SDMA0", + [257][1] = "SDMA1", + [258][1] = "SDMA2", + [259][1] = "SDMA3", + [260][1] = "SDMA4", + [261][1] = "SDMA5", + [262][1] = "SDMA6", + [263][1] = "SDMA7", + [384][1] = "OSS", +}; + +static const char *mmhub_client_ids_aldebaran[][2] = { + [2][0] = "MP1", + [3][0] = "MP0", + [32+1][0] = "DBGU_IO0", + [32+2][0] = "DBGU_IO2", + [32+4][0] = "MPIO", + [96+11][0] = "JPEG0", + [96+12][0] = "VCN0", + [96+13][0] = "VCNU0", + [128+11][0] = "JPEG1", + [128+12][0] = "VCN1", + [128+13][0] = "VCNU1", + [160+1][0] = "XDP", + [160+14][0] = "HDP", + [256+0][0] = "SDMA0", + [256+1][0] = "SDMA1", + [256+2][0] = "SDMA2", + [256+3][0] = "SDMA3", + [256+4][0] = "SDMA4", + [384+0][0] = "OSS", + [2][1] = "MP1", + [3][1] = "MP0", + [32+1][1] = "DBGU_IO0", + [32+2][1] = "DBGU_IO2", + [32+4][1] = "MPIO", + [96+11][1] = "JPEG0", + [96+12][1] = "VCN0", + [96+13][1] = "VCNU0", + [128+11][1] = "JPEG1", + [128+12][1] = "VCN1", + [128+13][1] = "VCNU1", + [160+1][1] = "XDP", + [160+14][1] = "HDP", + [256+0][1] = "SDMA0", + [256+1][1] = "SDMA1", + [256+2][1] = "SDMA2", + [256+3][1] = "SDMA3", + [256+4][1] = "SDMA4", + [384+0][1] = "OSS", }; static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = @@ -165,41 +406,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { (0x001d43e0 + 0x00001800), }; -static const uint32_t ecc_umc_mcumc_status_addrs[] = { - (0x000143c2 + 0x00000000), - (0x000143c2 + 0x00000800), - (0x000143c2 + 0x00001000), - (0x000143c2 + 0x00001800), - (0x000543c2 + 0x00000000), - (0x000543c2 + 0x00000800), - (0x000543c2 + 0x00001000), - (0x000543c2 + 0x00001800), - (0x000943c2 + 0x00000000), - (0x000943c2 + 0x00000800), - (0x000943c2 + 0x00001000), - (0x000943c2 + 0x00001800), - (0x000d43c2 + 0x00000000), - (0x000d43c2 + 0x00000800), - (0x000d43c2 + 0x00001000), - (0x000d43c2 + 0x00001800), - (0x001143c2 + 0x00000000), - (0x001143c2 + 0x00000800), - (0x001143c2 + 0x00001000), - (0x001143c2 + 0x00001800), - (0x001543c2 + 0x00000000), - (0x001543c2 + 0x00000800), - (0x001543c2 + 0x00001000), - (0x001543c2 + 0x00001800), - (0x001943c2 + 0x00000000), - (0x001943c2 + 0x00000800), - (0x001943c2 + 0x00001000), - (0x001943c2 + 0x00001800), - (0x001d43c2 + 0x00000000), - (0x001d43c2 + 0x00000800), - (0x001d43c2 + 0x00001000), - (0x001d43c2 + 0x00001800), -}; - static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -207,6 +413,11 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, { u32 bits, i, tmp, reg; + /* Devices newer then VEGA10/12 shall have these programming + sequences performed by PSP BL */ + if (adev->asic_type >= CHIP_VEGA20) + return 0; + bits = 0x7f; switch (state) { @@ -283,6 +494,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, WREG32(reg, tmp); } } + break; default: break; } @@ -291,89 +503,144 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, } static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *hub; bool retry_fault = !!(entry->src_data[1] & 0x80); - uint32_t status = 0; + bool write_fault = !!(entry->src_data[1] & 0x20); + uint32_t status = 0, cid = 0, rw = 0; + struct amdgpu_task_info task_info; + struct amdgpu_vmhub *hub; + const char *mmhub_cid; + const char *hub_name; u64 addr; - char hub_name[10]; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; - if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid, - entry->timestamp)) - return 1; /* This also prevents sending it to KFD */ + if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it onyl if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + return 1; + } + + if (!printk_ratelimit()) + return 0; if (entry->client_id == SOC15_IH_CLIENTID_VMC) { - snprintf(hub_name, sizeof(hub_name), "mmhub0"); + hub_name = "mmhub0"; hub = &adev->vmhub[AMDGPU_MMHUB_0]; } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { - snprintf(hub_name, sizeof(hub_name), "mmhub1"); + hub_name = "mmhub1"; hub = &adev->vmhub[AMDGPU_MMHUB_1]; } else { - snprintf(hub_name, sizeof(hub_name), "gfxhub0"); + hub_name = "gfxhub0"; hub = &adev->vmhub[AMDGPU_GFXHUB_0]; } - /* If it's the first fault for this address, process it normally */ - if (retry_fault && !in_interrupt() && - amdgpu_vm_handle_fault(adev, entry->pasid, addr)) - return 1; /* This also prevents sending it to KFD */ + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); + amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); - if (!amdgpu_sriov_vf(adev)) { - /* - * Issue a dummy read to wait for the status register to - * be updated to avoid reading an incorrect value due to - * the new fast GRBM interface. - */ - if (entry->vmid_src == AMDGPU_GFXHUB_0) - RREG32(hub->vm_l2_pro_fault_status); + dev_err(adev->dev, + "[%s] %s page fault (src_id:%u ring:%u vmid:%u " + "pasid:%u, for process %s pid %d thread %s pid %d)\n", + hub_name, retry_fault ? "retry" : "no-retry", + entry->src_id, entry->ring_id, entry->vmid, + entry->pasid, task_info.process_name, task_info.tgid, + task_info.task_name, task_info.pid); + dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n", + addr, entry->client_id, + soc15_ih_clientid_name[entry->client_id]); - status = RREG32(hub->vm_l2_pro_fault_status); - WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); - } - - if (printk_ratelimit()) { - struct amdgpu_task_info task_info; - - memset(&task_info, 0, sizeof(struct amdgpu_task_info)); - amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); - - dev_err(adev->dev, - "[%s] %s page fault (src_id:%u ring:%u vmid:%u " - "pasid:%u, for process %s pid %d thread %s pid %d)\n", - hub_name, retry_fault ? "retry" : "no-retry", - entry->src_id, entry->ring_id, entry->vmid, - entry->pasid, task_info.process_name, task_info.tgid, - task_info.task_name, task_info.pid); - dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", - addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) { - dev_err(adev->dev, - "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", - status); - dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", - REG_GET_FIELD(status, - VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); - dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", - REG_GET_FIELD(status, - VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); - dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", - REG_GET_FIELD(status, - VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); - dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", - REG_GET_FIELD(status, - VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); - dev_err(adev->dev, "\t RW: 0x%lx\n", - REG_GET_FIELD(status, - VM_L2_PROTECTION_FAULT_STATUS, RW)); + if (amdgpu_sriov_vf(adev)) + return 0; + /* + * Issue a dummy read to wait for the status register to + * be updated to avoid reading an incorrect value due to + * the new fast GRBM interface. + */ + if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) + RREG32(hub->vm_l2_pro_fault_status); + + status = RREG32(hub->vm_l2_pro_fault_status); + cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID); + rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + + + dev_err(adev->dev, + "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) { + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : + gfxhub_client_ids[cid], + cid); + } else { + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 0, 0): + mmhub_cid = mmhub_client_ids_vega10[cid][rw]; + break; + case IP_VERSION(9, 3, 0): + mmhub_cid = mmhub_client_ids_vega12[cid][rw]; + break; + case IP_VERSION(9, 4, 0): + mmhub_cid = mmhub_client_ids_vega20[cid][rw]; + break; + case IP_VERSION(9, 4, 1): + mmhub_cid = mmhub_client_ids_arcturus[cid][rw]; + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 0): + mmhub_cid = mmhub_client_ids_raven[cid][rw]; + break; + case IP_VERSION(1, 5, 0): + case IP_VERSION(2, 4, 0): + mmhub_cid = mmhub_client_ids_renoir[cid][rw]; + break; + case IP_VERSION(9, 4, 2): + mmhub_cid = mmhub_client_ids_aldebaran[cid][rw]; + break; + default: + mmhub_cid = NULL; + break; } + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + mmhub_cid ? mmhub_cid : "unknown", cid); } - + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%x\n", rw); return 0; } @@ -393,8 +660,11 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->gmc.vm_fault.num_types = 1; adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; - adev->gmc.ecc_irq.num_types = 1; - adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; + if (!amdgpu_sriov_vf(adev) && + !adev->gmc.xgmi.connected_to_cpu) { + adev->gmc.ecc_irq.num_types = 1; + adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; + } } static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, @@ -426,12 +696,26 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + return false; + return ((vmhub == AMDGPU_MMHUB_0 || vmhub == AMDGPU_MMHUB_1) && (!amdgpu_sriov_vf(adev)) && - (!(adev->asic_type == CHIP_RAVEN && - adev->rev_id < 0x8 && - adev->pdev->device == 0x15d8))); + (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && + (adev->apu_flags & AMD_APU_IS_PICASSO)))); +} + +static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, + uint8_t vmid, uint16_t *p_pasid) +{ + uint32_t value; + + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; + + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } /* @@ -446,6 +730,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vmid: vm instance to flush + * @vmhub: which hub to flush * @flush_type: the flush type * * Flush the TLB for the requested page table using certain type. @@ -455,25 +740,39 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; - u32 j, tmp; + u32 j, inv_req, inv_req2, tmp; struct amdgpu_vmhub *hub; BUG_ON(vmhub >= adev->num_vmhubs); hub = &adev->vmhub[vmhub]; - tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); + if (adev->gmc.xgmi.num_physical_nodes && + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { + /* Vega20+XGMI caches PTEs in TC and TLB. Add a + * heavy-weight TLB flush (type 2), which flushes + * both. Due to a race condition with concurrent + * memory accesses using the same TLB cache line, we + * still need a second TLB flush after this. + */ + inv_req = gmc_v9_0_get_invalidate_req(vmid, 2); + inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); + } else { + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); + inv_req2 = 0; + } /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal */ if (adev->gfx.kiq.ring.sched.ready && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - !adev->in_gpu_reset) { - uint32_t req = hub->vm_inv_eng0_req + eng; - uint32_t ack = hub->vm_inv_eng0_ack + eng; - - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, - 1 << vmid); + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && + down_read_trylock(&adev->reset_sem)) { + uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; + uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; + + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid); + up_read(&adev->reset_sem); return; } @@ -490,7 +789,8 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng); if (tmp & 0x1) break; udelay(1); @@ -500,21 +800,31 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + do { + WREG32_NO_KIQ(hub->vm_inv_eng0_req + + hub->eng_distance * eng, inv_req); - /* - * Issue a dummy read to wait for the ACK register to be cleared - * to avoid a false ACK due to the new fast GRBM interface. - */ - if (vmhub == AMDGPU_GFXHUB_0) - RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); + /* + * Issue a dummy read to wait for the ACK register to + * be cleared to avoid a false ACK due to the new fast + * GRBM interface. + */ + if ((vmhub == AMDGPU_GFXHUB_0) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) + RREG32_NO_KIQ(hub->vm_inv_eng0_req + + hub->eng_distance * eng); - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); - if (tmp & (1 << vmid)) - break; - udelay(1); - } + for (j = 0; j < adev->usec_timeout; j++) { + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + + hub->eng_distance * eng); + if (tmp & (1 << vmid)) + break; + udelay(1); + } + + inv_req = inv_req2; + inv_req2 = 0; + } while (inv_req); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ if (use_semaphore) @@ -522,7 +832,8 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0); spin_unlock(&adev->gmc.invalidate_lock); @@ -532,6 +843,95 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for VM flush ACK!\n"); } +/** + * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * @flush_type: the flush type + * @all_hub: flush all hubs + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid, i; + signed long r; + uint32_t seq; + uint16_t queried_pasid; + bool ret; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + if (amdgpu_in_reset(adev)) + return -EIO; + + if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) { + /* Vega20+XGMI caches PTEs in TC and TLB. Add a + * heavy-weight TLB flush (type 2), which flushes + * both. Due to a race condition with concurrent + * memory accesses using the same TLB cache line, we + * still need a second TLB flush after this. + */ + bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); + /* 2 dwords flush + 8 dwords fence */ + unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8; + + if (vega20_xgmi_wa) + ndw += kiq->pmf->invalidate_tlbs_size; + + spin_lock(&adev->gfx.kiq.ring_lock); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, ndw); + if (vega20_xgmi_wa) + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, 2, all_hub); + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, flush_type, all_hub); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) { + amdgpu_ring_undo(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + up_read(&adev->reset_sem); + return -ETIME; + } + + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + if (r < 1) { + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); + up_read(&adev->reset_sem); + return -ETIME; + } + up_read(&adev->reset_sem); + return 0; + } + + for (vmid = 1; vmid < 16; vmid++) { + + ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + if (all_hub) { + for (i = 0; i < adev->num_vmhubs; i++) + gmc_v9_0_flush_gpu_tlb(adev, vmid, + i, flush_type); + } else { + gmc_v9_0_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, flush_type); + } + break; + } + } + + return 0; + +} + static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { @@ -552,16 +952,21 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, if (use_semaphore) /* a read return value of 1 means semaphore acuqire */ amdgpu_ring_emit_reg_wait(ring, - hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0x1, 0x1); - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + + (hub->ctx_addr_distance * vmid), lower_32_bits(pd_addr)); - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + + (hub->ctx_addr_distance * vmid), upper_32_bits(pd_addr)); - amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, - hub->vm_inv_eng0_ack + eng, + amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + + hub->eng_distance * eng, + hub->vm_inv_eng0_ack + + hub->eng_distance * eng, req, 1 << vmid); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ @@ -570,7 +975,8 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, * add semaphore release after invalidation, * write with 0 means semaphore release */ - amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0); return pd_addr; } @@ -650,8 +1056,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) - *addr = adev->vm_manager.vram_base_offset + *addr - - adev->gmc.vram_start; + *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); BUG_ON(*addr & 0xFFFF00000000003FULL); if (!adev->gmc.translate_further) @@ -685,19 +1090,58 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, *flags &= ~AMDGPU_PTE_VALID; } - if (adev->asic_type == CHIP_ARCTURUS && + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) && !(*flags & AMDGPU_PTE_SYSTEM) && mapping->bo_va->is_xgmi) *flags |= AMDGPU_PTE_SNOOPED; + + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + *flags |= mapping->flags & AMDGPU_PTE_SNOOPED; +} + +static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(2, 1, 0): + viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * + 4); + break; + default: + viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * + 4); + break; + } + } + + return size; } static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, .map_mtype = gmc_v9_0_map_mtype, .get_vm_pde = gmc_v9_0_get_vm_pde, - .get_vm_pte = gmc_v9_0_get_vm_pte + .get_vm_pte = gmc_v9_0_get_vm_pte, + .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -707,17 +1151,37 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[UMC_HWIP][0]) { + case IP_VERSION(6, 0, 0): adev->umc.funcs = &umc_v6_0_funcs; break; - case CHIP_VEGA20: + case IP_VERSION(6, 1, 1): adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; - adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET; + adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; - adev->umc.funcs = &umc_v6_1_funcs; + adev->umc.ras_funcs = &umc_v6_1_ras_funcs; + break; + case IP_VERSION(6, 1, 2): + adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; + adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; + adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; + adev->umc.ras_funcs = &umc_v6_1_ras_funcs; + break; + case IP_VERSION(6, 7, 0): + adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM; + adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; + if (!adev->gmc.xgmi.connected_to_cpu) + adev->umc.ras_funcs = &umc_v6_7_ras_funcs; + if (1 & adev->smuio.funcs->get_die_id(adev)) + adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; + else + adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; break; default: break; @@ -726,10 +1190,55 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA20: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 4, 1): + adev->mmhub.funcs = &mmhub_v9_4_funcs; + break; + case IP_VERSION(9, 4, 2): + adev->mmhub.funcs = &mmhub_v1_7_funcs; + break; + default: adev->mmhub.funcs = &mmhub_v1_0_funcs; break; + } +} + +static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 4, 0): + adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs; + break; + case IP_VERSION(9, 4, 1): + adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs; + break; + case IP_VERSION(9, 4, 2): + adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs; + break; + default: + /* mmhub ras is not available */ + break; + } +} + +static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) +{ + adev->gfxhub.funcs = &gfxhub_v1_0_funcs; +} + +static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) +{ + adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs; +} + +static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) +{ + /* is UMC the right IP to check for MCA? Maybe DF? */ + switch (adev->ip_versions[UMC_HWIP][0]) { + case IP_VERSION(6, 7, 0): + if (!adev->gmc.xgmi.connected_to_cpu) + adev->mca.funcs = &mca_v3_0_funcs; + break; default: break; } @@ -739,10 +1248,25 @@ static int gmc_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */ + if (adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS) + adev->gmc.xgmi.supported = true; + + if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { + adev->gmc.xgmi.supported = true; + adev->gmc.xgmi.connected_to_cpu = + adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); + } + gmc_v9_0_set_gmc_funcs(adev); gmc_v9_0_set_irq_funcs(adev); gmc_v9_0_set_umc_funcs(adev); gmc_v9_0_set_mmhub_funcs(adev); + gmc_v9_0_set_mmhub_ras_funcs(adev); + gmc_v9_0_set_gfxhub_funcs(adev); + gmc_v9_0_set_hdp_ras_funcs(adev); + gmc_v9_0_set_mca_funcs(adev); adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = @@ -754,98 +1278,37 @@ static int gmc_v9_0_early_init(void *handle) return 0; } -static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) -{ - - /* - * TODO: - * Currently there is a bug where some memory client outside - * of the driver writes to first 8M of VRAM on S3 resume, - * this overrides GART which by default gets placed in first 8M and - * causes VM_FAULTS once GTT is accessed. - * Keep the stolen memory reservation until the while this is not solved. - * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init - */ - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - return true; - case CHIP_VEGA12: - case CHIP_VEGA20: - default: - return false; - } -} - -static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring; - unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = - {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, - GFXHUB_FREE_VM_INV_ENGS_BITMAP}; - unsigned i; - unsigned vmhub, inv_eng; - - for (i = 0; i < adev->num_rings; ++i) { - ring = adev->rings[i]; - vmhub = ring->funcs->vmhub; - - inv_eng = ffs(vm_inv_engs[vmhub]); - if (!inv_eng) { - dev_err(adev->dev, "no VM inv eng for ring %s\n", - ring->name); - return -EINVAL; - } - - ring->vm_inv_eng = inv_eng - 1; - vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); - - dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", - ring->name, ring->vm_inv_eng, ring->funcs->vmhub); - } - - return 0; -} - static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - if (!gmc_v9_0_keep_stolen_memory(adev)) - amdgpu_bo_late_init(adev); - - r = gmc_v9_0_allocate_vm_inv_eng(adev); + r = amdgpu_gmc_allocate_vm_inv_eng(adev); if (r) return r; - /* Check if ecc is available */ - if (!amdgpu_sriov_vf(adev)) { - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA20: - r = amdgpu_atomfirmware_mem_ecc_supported(adev); - if (!r) { - DRM_INFO("ECC is not present.\n"); - if (adev->df_funcs->enable_ecc_force_par_wr_rmw) - adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); - } else { - DRM_INFO("ECC is active.\n"); - } - r = amdgpu_atomfirmware_sram_ecc_supported(adev); - if (!r) { - DRM_INFO("SRAM ECC is not present.\n"); - } else { - DRM_INFO("SRAM ECC is active.\n"); - } - break; - default: - break; + /* + * Workaround performance drop issue with VBIOS enables partial + * writes, while disables HBM ECC for vega10. + */ + if (!amdgpu_sriov_vf(adev) && + (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { + if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { + if (adev->df.funcs->enable_ecc_force_par_wr_rmw) + adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); } } + if (!amdgpu_persistent_edc_harvesting_supported(adev)) { + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_count) + adev->mmhub.ras_funcs->reset_ras_error_count(adev); + + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->reset_ras_error_count) + adev->hdp.ras_funcs->reset_ras_error_count(adev); + } + r = amdgpu_gmc_ras_late_init(adev); if (r) return r; @@ -856,20 +1319,19 @@ static int gmc_v9_0_late_init(void *handle) static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { - u64 base = 0; - - if (adev->asic_type == CHIP_ARCTURUS) - base = mmhub_v9_4_get_fb_location(adev); - else if (!amdgpu_sriov_vf(adev)) - base = mmhub_v1_0_get_fb_location(adev); + u64 base = adev->mmhub.funcs->get_fb_location(adev); /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; - amdgpu_gmc_vram_location(adev, mc, base); - amdgpu_gmc_gart_location(adev, mc); - amdgpu_gmc_agp_location(adev, mc); + if (adev->gmc.xgmi.connected_to_cpu) { + amdgpu_gmc_sysvm_location(adev, mc); + } else { + amdgpu_gmc_vram_location(adev, mc, base); + amdgpu_gmc_gart_location(adev, mc); + amdgpu_gmc_agp_location(adev, mc); + } /* base offset of vram pages */ - adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); + adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); /* XXX: add the xgmi offset of the physical node? */ adev->vm_manager.vram_base_offset += @@ -894,7 +1356,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; - if (!(adev->flags & AMD_IS_APU)) { + if (!(adev->flags & AMD_IS_APU) && + !adev->gmc.xgmi.connected_to_cpu) { r = amdgpu_device_resize_fb_bar(adev); if (r) return r; @@ -903,10 +1366,28 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { - adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev); + /* + * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi + * interface can use VRAM through here as it appears system reserved + * memory in host address space. + * + * For APUs, VRAM is just the stolen system memory and can be accessed + * directly. + * + * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR. + */ + + /* check whether both host-gpu and gpu-gpu xgmi links exist */ + if ((adev->flags & AMD_IS_APU) || + (adev->gmc.xgmi.supported && + adev->gmc.xgmi.connected_to_cpu)) { + adev->gmc.aper_base = + adev->gfxhub.funcs->get_mc_fb_offset(adev) + + adev->gmc.xgmi.physical_node_id * + adev->gmc.xgmi.node_segment_size; adev->gmc.aper_size = adev->gmc.real_vram_size; } + #endif /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; @@ -915,16 +1396,18 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) /* set the gart size */ if (amdgpu_gart_size == -1) { - switch (adev->asic_type) { - case CHIP_VEGA10: /* all engines support GPUVM */ - case CHIP_VEGA12: /* all engines support GPUVM */ - case CHIP_VEGA20: - case CHIP_ARCTURUS: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): /* all engines support GPUVM */ + case IP_VERSION(9, 2, 1): /* all engines support GPUVM */ + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): default: adev->gmc.gart_size = 512ULL << 20; break; - case CHIP_RAVEN: /* DCE SG support */ - case CHIP_RENOIR: + case IP_VERSION(9, 1, 0): /* DCE SG support */ + case IP_VERSION(9, 2, 2): /* DCE SG support */ + case IP_VERSION(9, 3, 0): adev->gmc.gart_size = 1024ULL << 20; break; } @@ -932,6 +1415,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; } + adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; + gmc_v9_0_vram_gtt_location(adev, &adev->gmc); return 0; @@ -945,6 +1430,15 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) WARN(1, "VEGA10 PCIE GART already initialized\n"); return 0; } + + if (adev->gmc.xgmi.connected_to_cpu) { + adev->gmc.vmid0_page_table_depth = 1; + adev->gmc.vmid0_page_table_block_size = 12; + } else { + adev->gmc.vmid0_page_table_depth = 0; + adev->gmc.vmid0_page_table_block_size = 0; + } + /* Initialize common gart structure */ r = amdgpu_gart_init(adev); if (r) @@ -952,53 +1446,31 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) adev->gart.table_size = adev->gart.num_gpu_pages * 8; adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | AMDGPU_PTE_EXECUTABLE; - return amdgpu_gart_table_vram_alloc(adev); -} -static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) -{ - u32 d1vga_control; - unsigned size; - - /* - * TODO Remove once GART corruption is resolved - * Check related code in gmc_v9_0_sw_fini - * */ - if (gmc_v9_0_keep_stolen_memory(adev)) - return 9 * 1024 * 1024; - - d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ - } else { - u32 viewport; + r = amdgpu_gart_table_vram_alloc(adev); + if (r) + return r; - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: - viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); - size = (REG_GET_FIELD(viewport, - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * - REG_GET_FIELD(viewport, - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * - 4); - break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - default: - viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); - size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * - REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * - 4); - break; - } + if (adev->gmc.xgmi.connected_to_cpu) { + r = amdgpu_gmc_pdb0_alloc(adev); } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; - return size; + return r; +} + +/** + * gmc_v9_0_save_registers - saves regs + * + * @adev: amdgpu_device pointer + * + * This saves potential register values that should be + * restored upon resume + */ +static void gmc_v9_0_save_registers(struct amdgpu_device *adev) +{ + if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) + adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } static int gmc_v9_0_sw_init(void *handle) @@ -1006,11 +1478,11 @@ static int gmc_v9_0_sw_init(void *handle) int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gfxhub_v1_0_init(adev); - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_init(adev); - else - mmhub_v1_0_init(adev); + adev->gfxhub.funcs->init(adev); + + adev->mmhub.funcs->init(adev); + if (adev->mca.funcs) + adev->mca.funcs->init(adev); spin_lock_init(&adev->gmc.invalidate_lock); @@ -1034,14 +1506,15 @@ static int gmc_v9_0_sw_init(void *handle) else chansize = 128; - numchan = adev->df_funcs->get_hbm_channel_number(adev); + numchan = adev->df.funcs->get_hbm_channel_number(adev); adev->gmc.vram_width = numchan * chansize; } adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): adev->num_vmhubs = 2; if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { @@ -1053,10 +1526,11 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.num_level > 1; } break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RENOIR: + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): adev->num_vmhubs = 2; @@ -1071,7 +1545,7 @@ static int gmc_v9_0_sw_init(void *handle) else amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->num_vmhubs = 3; /* Keep the vm size same with Vega20 */ @@ -1087,7 +1561,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - if (adev->asic_type == CHIP_ARCTURUS) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, &adev->gmc.vm_fault); if (r) @@ -1100,11 +1574,14 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - /* interrupt sent to DF. */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, - &adev->gmc.ecc_irq); - if (r) - return r; + if (!amdgpu_sriov_vf(adev) && + !adev->gmc.xgmi.connected_to_cpu) { + /* interrupt sent to DF. */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, + &adev->gmc.ecc_irq); + if (r) + return r; + } /* Set the internal MC address mask * This is the max address of the GPU's @@ -1120,7 +1597,7 @@ static int gmc_v9_0_sw_init(void *handle) adev->need_swiotlb = drm_need_swiotlb(44); if (adev->gmc.xgmi.supported) { - r = gfxhub_v1_1_get_xgmi_info(adev); + r = adev->gfxhub.funcs->get_xgmi_info(adev); if (r) return r; } @@ -1129,7 +1606,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -1143,33 +1620,34 @@ static int gmc_v9_0_sw_init(void *handle) /* * number of VMs * VMID 0 is reserved for System - * amdgpu graphics/compute will use VMIDs 1-7 - * amdkfd will use VMIDs 8-15 + * amdgpu graphics/compute will use VMIDs 1..n-1 + * amdkfd will use VMIDs n..15 + * + * The first KFD VMID is 8 for GPUs with graphics, 3 for + * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs + * for video processing. */ - adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; - adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; - adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.first_kfd_vmid = + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8; amdgpu_vm_manager_init(adev); + gmc_v9_0_save_registers(adev); + return 0; } static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - void *stolen_vga_buf; amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - - if (gmc_v9_0_keep_stolen_memory(adev)) - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); - amdgpu_gart_table_vram_free(adev); + amdgpu_bo_unref(&adev->gmc.pdb0_bo); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); return 0; } @@ -1177,12 +1655,12 @@ static int gmc_v9_0_sw_fini(void *handle) static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 0, 0): if (amdgpu_sriov_vf(adev)) break; - /* fall through */ - case CHIP_VEGA20: + fallthrough; + case IP_VERSION(9, 4, 0): soc15_program_register_sequence(adev, golden_settings_mmhub_1_0_0, ARRAY_SIZE(golden_settings_mmhub_1_0_0)); @@ -1190,9 +1668,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_athub_1_0_0, ARRAY_SIZE(golden_settings_athub_1_0_0)); break; - case CHIP_VEGA12: - break; - case CHIP_RAVEN: + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 0): /* TODO for renoir */ soc15_program_register_sequence(adev, golden_settings_athub_1_0_0, @@ -1204,6 +1681,23 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) } /** + * gmc_v9_0_restore_registers - restores regs + * + * @adev: amdgpu_device pointer + * + * This restores register values, saved at suspend. + */ +void gmc_v9_0_restore_registers(struct amdgpu_device *adev) +{ + if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { + WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); + WARN_ON(adev->gmc.sdpif_register != + RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0)); + } +} + +/** * gmc_v9_0_gart_enable - gart enable * * @adev: amdgpu_device pointer @@ -1212,28 +1706,34 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) { int r; + if (adev->gmc.xgmi.connected_to_cpu) + amdgpu_gmc_init_pdb0(adev); + if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } + r = amdgpu_gart_table_vram_pin(adev); if (r) return r; - r = gfxhub_v1_0_gart_enable(adev); + r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; - if (adev->asic_type == CHIP_ARCTURUS) - r = mmhub_v9_4_gart_enable(adev); - else - r = mmhub_v1_0_gart_enable(adev); + r = adev->mmhub.funcs->gart_enable(adev); if (r) return r; - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); + DRM_INFO("PCIE GART of %uM enabled.\n", + (unsigned)(adev->gmc.gart_size >> 20)); + if (adev->gmc.pdb0_bo) + DRM_INFO("PDB0 located at 0x%016llX\n", + (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); + DRM_INFO("PTB located at 0x%016llX\n", + (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); + adev->gart.ready = true; return 0; } @@ -1243,59 +1743,34 @@ static int gmc_v9_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool value; int r, i; - u32 tmp; /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); if (adev->mode_info.num_crtc) { - if (adev->asic_type != CHIP_ARCTURUS) { - /* Lockout access through VGA aperture*/ - WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - - /* disable VGA render */ - WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - } + /* Lockout access through VGA aperture*/ + WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + /* disable VGA render */ + WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); } - amdgpu_device_program_register_sequence(adev, - golden_settings_vega10_hdp, - ARRAY_SIZE(golden_settings_vega10_hdp)); + if (adev->mmhub.funcs->update_power_gating) + adev->mmhub.funcs->update_power_gating(adev, true); - switch (adev->asic_type) { - case CHIP_RAVEN: - /* TODO for renoir */ - mmhub_v1_0_update_power_gating(adev, true); - break; - case CHIP_ARCTURUS: - WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); - break; - default: - break; - } - - WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); - - tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); - WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); - - WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); - WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); + adev->hdp.funcs->init_registers(adev); /* After HDP is initialized, flush HDP.*/ - adev->nbio.funcs->hdp_flush(adev, NULL); + adev->hdp.funcs->flush_hdp(adev, NULL); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; else value = true; - gfxhub_v1_0_set_fault_enable_default(adev, value); - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_set_fault_enable_default(adev, value); - else - mmhub_v1_0_set_fault_enable_default(adev, value); - + if (!amdgpu_sriov_vf(adev)) { + adev->gfxhub.funcs->set_fault_enable_default(adev, value); + adev->mmhub.funcs->set_fault_enable_default(adev, value); + } for (i = 0; i < adev->num_vmhubs; ++i) gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); @@ -1316,11 +1791,8 @@ static int gmc_v9_0_hw_init(void *handle) */ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) { - gfxhub_v1_0_gart_disable(adev); - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_gart_disable(adev); - else - mmhub_v1_0_gart_disable(adev); + adev->gfxhub.funcs->gart_disable(adev); + adev->mmhub.funcs->gart_disable(adev); amdgpu_gart_table_vram_unpin(adev); } @@ -1328,6 +1800,8 @@ static int gmc_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v9_0_gart_disable(adev); + if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); @@ -1336,7 +1810,6 @@ static int gmc_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - gmc_v9_0_gart_disable(adev); return 0; } @@ -1385,10 +1858,7 @@ static int gmc_v9_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_set_clockgating(adev, state); - else - mmhub_v1_0_set_clockgating(adev, state); + adev->mmhub.funcs->set_clockgating(adev, state); athub_v1_0_set_clockgating(adev, state); @@ -1399,10 +1869,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_get_clockgating(adev, flags); - else - mmhub_v1_0_get_clockgating(adev, flags); + adev->mmhub.funcs->get_clockgating(adev, flags); athub_v1_0_get_clockgating(adev, flags); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index 971c0840358f..c415c439f690 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -24,24 +24,8 @@ #ifndef __GMC_V9_0_H__ #define __GMC_V9_0_H__ - /* - * The latest engine allocation on gfx9 is: - * Engine 2, 3: firmware - * Engine 0, 1, 4~16: amdgpu ring, - * subject to change when ring number changes - * Engine 17: Gart flushes - */ -#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 -#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 - extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; -/* amdgpu_amdkfd*.c */ -void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t value); -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t value); -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, - uint32_t vmid, uint64_t value); +void gmc_v9_0_restore_registers(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c new file mode 100644 index 000000000000..eecfb1545c1e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -0,0 +1,166 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "hdp_v4_0.h" +#include "amdgpu_ras.h" + +#include "hdp/hdp_4_0_offset.h" +#include "hdp/hdp_4_0_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> + +/* for Vega20 register name change */ +#define mmHDP_MEM_POWER_CTRL 0x00d4 +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L +#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 + +static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + else + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +} + +static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0)) + return; + + if (!ring || !ring->funcs->emit_wreg) + WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + else + amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( + HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); +} + +static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) + return; + + /* HDP SRAM errors are uncorrectable ones (i.e. fatal errors) */ + err_data->ue_count += RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); +}; + +static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) + return; + + if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0)) + WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0); + else + /*read back hdp ras counter to reset it to 0 */ + RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); +} + +static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) { + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; + else + data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; + + if (def != data) + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); + } else { + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) + data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; + else + data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); + + if (def != data) + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); + } +} + +static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + int data; + + /* AMD_CG_SUPPORT_HDP_LS */ + data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); + if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_HDP_LS; +} + +static void hdp_v4_0_init_registers(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[HDP_HWIP][0]) { + case IP_VERSION(4, 2, 1): + WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); + break; + default: + break; + } + + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); + + WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); + WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); +} + +const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs = { + .ras_late_init = amdgpu_hdp_ras_late_init, + .ras_fini = amdgpu_hdp_ras_fini, + .query_ras_error_count = hdp_v4_0_query_ras_error_count, + .reset_ras_error_count = hdp_v4_0_reset_ras_error_count, +}; + +const struct amdgpu_hdp_funcs hdp_v4_0_funcs = { + .flush_hdp = hdp_v4_0_flush_hdp, + .invalidate_hdp = hdp_v4_0_invalidate_hdp, + .update_clock_gating = hdp_v4_0_update_clock_gating, + .get_clock_gating_state = hdp_v4_0_get_clockgating_state, + .init_registers = hdp_v4_0_init_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h new file mode 100644 index 000000000000..dc3a1b81dd62 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h @@ -0,0 +1,32 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __HDP_V4_0_H__ +#define __HDP_V4_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs; +extern const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c new file mode 100644 index 000000000000..5793977953cc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -0,0 +1,223 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "hdp_v5_0.h" + +#include "hdp/hdp_5_0_0_offset.h" +#include "hdp/hdp_5_0_0_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> + +static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + else + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); +} + +static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) { + WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + } else { + amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( + HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); + } +} + +static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t hdp_clk_cntl, hdp_clk_cntl1; + uint32_t hdp_mem_pwr_cntl; + + if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD))) + return; + + hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); + hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); + + /* Before doing clock/power mode switch, + * forced on IPH & RC clock */ + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + IPH_MEM_CLK_SOFT_OVERRIDE, 1); + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + RC_MEM_CLK_SOFT_OVERRIDE, 1); + WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); + + /* HDP 5.0 doesn't support dynamic power mode switch, + * disable clock and power gating before any changing */ + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_CTRL_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_LS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_DS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_SD_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_CTRL_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_LS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, 0); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_SD_EN, 0); + WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); + + /* Already disabled above. The actions below are for "enabled" only */ + if (enable) { + /* only one clock gating mode (LS/DS/SD) can be enabled */ + if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_LS_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_LS_EN, 1); + } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_DS_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, 1); + } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_SD_EN, 1); + /* RC should not use shut down mode, fallback to ds or ls if allowed */ + if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_DS_EN, 1); + else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, + HDP_MEM_POWER_CTRL, + RC_MEM_POWER_LS_EN, 1); + } + + /* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to + * be set for SRAM LS/DS/SD */ + if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD)) { + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + IPH_MEM_POWER_CTRL_EN, 1); + hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, + RC_MEM_POWER_CTRL_EN, 1); + WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); + } + } + + /* disable IPH & RC clock override after clock/power mode changing */ + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + IPH_MEM_CLK_SOFT_OVERRIDE, 0); + hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, + RC_MEM_CLK_SOFT_OVERRIDE, 0); + WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); +} + +static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t hdp_clk_cntl; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) + return; + + hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); + + if (enable) { + hdp_clk_cntl &= + ~(uint32_t) + (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); + } else { + hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; + } + + WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); +} + +static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + hdp_v5_0_update_mem_power_gating(adev, enable); + hdp_v5_0_update_medium_grain_clock_gating(adev, enable); +} + +static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + uint32_t tmp; + + /* AMD_CG_SUPPORT_HDP_MGCG */ + tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); + if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | + HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) + *flags |= AMD_CG_SUPPORT_HDP_MGCG; + + /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ + tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); + if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_LS; + else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_DS; + else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) + *flags |= AMD_CG_SUPPORT_HDP_SD; +} + +static void hdp_v5_0_init_registers(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); + tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; + WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); +} + +const struct amdgpu_hdp_funcs hdp_v5_0_funcs = { + .flush_hdp = hdp_v5_0_flush_hdp, + .invalidate_hdp = hdp_v5_0_invalidate_hdp, + .update_clock_gating = hdp_v5_0_update_clock_gating, + .get_clock_gating_state = hdp_v5_0_get_clockgating_state, + .init_registers = hdp_v5_0_init_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h new file mode 100644 index 000000000000..2d5ec2b419f3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __HDP_V5_0_H__ +#define __HDP_V5_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_hdp_funcs hdp_v5_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index a13dd9a51149..ddfe4eaeea05 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -179,6 +179,7 @@ static void iceland_ih_irq_disable(struct amdgpu_device *adev) * iceland_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to fetch wptr * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (VI). Also check for @@ -193,19 +194,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + + +out: return (wptr & ih->ptr_mask); } @@ -213,6 +224,8 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, * iceland_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to decode + * @entry: IV entry to place decoded information into * * Decodes the interrupt vector at the current rptr * position and also advance the position. @@ -245,6 +258,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, * iceland_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to set rptr * * Set the IH ring buffer rptr. */ @@ -286,8 +300,7 @@ static int iceland_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c new file mode 100644 index 000000000000..9360204da7fb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -0,0 +1,613 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15.h" +#include "soc15d.h" +#include "vcn_v1_0.h" +#include "jpeg_v1_0.h" + +#include "vcn/vcn_1_0_offset.h" +#include "vcn/vcn_1_0_sh_mask.h" + +static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev); +static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring); + +static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) +{ + struct amdgpu_device *adev = ring->adev; + ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + ring->ring[(*ptr)++] = 0; + ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); + } else { + ring->ring[(*ptr)++] = reg_offset; + ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); + } + ring->ring[(*ptr)++] = val; +} + +static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) +{ + struct amdgpu_device *adev = ring->adev; + + uint32_t reg, reg_offset, val, mask, i; + + // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW); + reg_offset = (reg << 2); + val = lower_32_bits(ring->gpu_addr); + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH); + reg_offset = (reg << 2); + val = upper_32_bits(ring->gpu_addr); + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 3rd to 5th: issue MEM_READ commands + for (i = 0; i <= 2; i++) { + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2); + ring->ring[ptr++] = 0; + } + + // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x13; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 7th: program mmUVD_JRBC_RB_REF_DATA + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA); + reg_offset = (reg << 2); + val = 0x1; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x1; + mask = 0x1; + + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0); + ring->ring[ptr++] = 0x01400200; + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0); + ring->ring[ptr++] = val; + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + ring->ring[ptr++] = 0; + ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3); + } else { + ring->ring[ptr++] = reg_offset; + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3); + } + ring->ring[ptr++] = mask; + + //9th to 21st: insert no-op + for (i = 0; i <= 12; i++) { + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); + ring->ring[ptr++] = 0; + } + + //22nd: reset mmUVD_JRBC_RB_RPTR + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_RPTR); + reg_offset = (reg << 2); + val = 0; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x12; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); +} + +/** + * jpeg_v1_0_decode_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v1_0_decode_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v1_0_decode_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v1_0_decode_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v1_0_decode_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v1_0_decode_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); +} + +/** + * jpeg_v1_0_decode_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +static void jpeg_v1_0_decode_ring_insert_start(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80010000); +} + +/** + * jpeg_v1_0_decode_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +static void jpeg_v1_0_decode_ring_insert_end(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00010000); +} + +/** + * jpeg_v1_0_decode_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags + * + * Write a fence and a trap command to the ring. + */ +static void jpeg_v1_0_decode_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + struct amdgpu_device *adev = ring->adev; + + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0xffffffff); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x3fbc); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x1); + + /* emit trap */ + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); +} + +/** + * jpeg_v1_0_decode_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from + * @ib: indirect buffer to execute + * @flags: unused + * + * Write ring commands to execute the indirect buffer. + */ +static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, ib->length_dw); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); + amdgpu_ring_write(ring, 0x2); +} + +static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val, + uint32_t mask) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, val); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE3)); + } + amdgpu_ring_write(ring, mask); +} + +static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + jpeg_v1_0_decode_ring_emit_reg_wait(ring, data0, data1, mask); +} + +static void jpeg_v1_0_decode_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + } + amdgpu_ring_write(ring, val); +} + +static void jpeg_v1_0_decode_ring_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } +} + +static int jpeg_v1_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG decode TRAP\n"); + + switch (entry->src_id) { + case 126: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +/** + * jpeg_v1_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +int jpeg_v1_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v1_0_set_dec_ring_funcs(adev); + jpeg_v1_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v1_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + */ +int jpeg_v1_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->jpeg.inst->irq); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, + 0, AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch = + SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v1_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG free up sw allocation + */ +void jpeg_v1_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec); +} + +/** + * jpeg_v1_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * @mode: SPG or DPG mode + * + * Setup and start the JPEG block + */ +void jpeg_v1_0_start(struct amdgpu_device *adev, int mode) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + + if (mode == 0) { + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | + UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + } + + /* initialize wptr */ + ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); + + /* copy patch commands to the jpeg ring */ + jpeg_v1_0_decode_ring_set_patch_ring(ring, + (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); +} + +static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .nop = PACKET0(0x81ff, 0), + .support_64bit_ptrs = false, + .no_user_fence = true, + .vmhub = AMDGPU_MMHUB_0, + .extra_dw = 64, + .get_rptr = jpeg_v1_0_decode_ring_get_rptr, + .get_wptr = jpeg_v1_0_decode_ring_get_wptr, + .set_wptr = jpeg_v1_0_decode_ring_set_wptr, + .emit_frame_size = + 6 + 6 + /* hdp invalidate / flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v1_0_decode_ring_emit_vm_flush */ + 26 + 26 + /* jpeg_v1_0_decode_ring_emit_fence x2 vm fence */ + 6, + .emit_ib_size = 22, /* jpeg_v1_0_decode_ring_emit_ib */ + .emit_ib = jpeg_v1_0_decode_ring_emit_ib, + .emit_fence = jpeg_v1_0_decode_ring_emit_fence, + .emit_vm_flush = jpeg_v1_0_decode_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v1_0_decode_ring_nop, + .insert_start = jpeg_v1_0_decode_ring_insert_start, + .insert_end = jpeg_v1_0_decode_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = jpeg_v1_0_ring_begin_use, + .end_use = vcn_v1_0_ring_end_use, + .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg, + .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs; + DRM_INFO("JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = { + .set = jpeg_v1_0_set_interrupt_state, + .process = jpeg_v1_0_process_interrupt, +}; + +static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs; +} + +static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); + int cnt = 0; + + mutex_lock(&adev->vcn.vcn1_jpeg1_workaround); + + if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec)) + DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n"); + + for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) { + if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt])) + DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt); + } + + vcn_v1_0_set_pg_for_begin_use(ring, set_clocks); +} diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h new file mode 100644 index 000000000000..bbf33a6a3972 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V1_0_H__ +#define __JPEG_V1_0_H__ + +int jpeg_v1_0_early_init(void *handle); +int jpeg_v1_0_sw_init(void *handle); +void jpeg_v1_0_sw_fini(void *handle); +void jpeg_v1_0_start(struct amdgpu_device *adev, int mode); + +#endif /*__JPEG_V1_0_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c new file mode 100644 index 000000000000..299de1d131d8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -0,0 +1,812 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v2_0.h" + +#include "vcn/vcn_2_0_0_offset.h" +#include "vcn/vcn_2_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v2_0_set_powergating_state(void *handle, + enum amd_powergating_state state); + +/** + * jpeg_v2_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v2_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v2_0_set_dec_ring_funcs(adev); + jpeg_v2_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v2_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v2_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); + if (r) + return r; + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, + 0, AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v2_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v2_0_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v2_0_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v2_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + + r = amdgpu_ring_test_helper(ring); + if (!r) + DRM_INFO("JPEG decode initialized successfully.\n"); + + return r; +} + +/** + * jpeg_v2_0_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v2_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) + jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + + return 0; +} + +/** + * jpeg_v2_0_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v2_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v2_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v2_0_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v2_0_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v2_0_hw_init(adev); + + return r; +} + +static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev) +{ + uint32_t data; + int r = 0; + + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + r = SOC15_WAIT_ON_RREG(JPEG, 0, + mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + + if (r) { + DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); + return r; + } + } + + /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */ + data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); + + return 0; +} + +static int jpeg_v2_0_enable_power_gating(struct amdgpu_device *adev) +{ + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + uint32_t data; + int r = 0; + + data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)); + data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; + data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); + + data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, + (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + + if (r) { + DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); + return r; + } + } + + return 0; +} + +static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JPEG_ENC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); +} + +static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); +} + +/** + * jpeg_v2_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v2_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, true); + + /* disable power gating */ + r = jpeg_v2_0_disable_power_gating(adev); + if (r) + return r; + + /* JPEG disable CGC */ + jpeg_v2_0_disable_clock_gating(adev); + + WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v2_0_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v2_0_stop(struct amdgpu_device *adev) +{ + int r; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable JPEG CGC */ + jpeg_v2_0_enable_clock_gating(adev); + + /* enable power gating */ + r = jpeg_v2_0_enable_power_gating(adev); + if (r) + return r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, false); + + return 0; +} + +/** + * jpeg_v2_0_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v2_0_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v2_0_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +/** + * jpeg_v2_0_dec_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80010000); +} + +/** + * jpeg_v2_0_dec_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00010000); +} + +/** + * jpeg_v2_0_dec_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags + * + * Write a fence and a trap command to the ring. + */ +void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x3fbc); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x1); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); +} + +/** + * jpeg_v2_0_dec_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from + * @ib: indirect buffer to execute + * @flags: unused + * + * Write ring commands to execute the indirect buffer. + */ +void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, ib->length_dw); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); + amdgpu_ring_write(ring, 0x2); +} + +void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, val); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE3)); + } + amdgpu_ring_write(ring, mask); +} + +void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + jpeg_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); +} + +void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + } + amdgpu_ring_write(ring, val); +} + +void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } +} + +static bool jpeg_v2_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); +} + +static int jpeg_v2_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + + return ret; +} + +static int jpeg_v2_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE); + + if (enable) { + if (!jpeg_v2_0_is_idle(handle)) + return -EBUSY; + jpeg_v2_0_enable_clock_gating(adev); + } else { + jpeg_v2_0_disable_clock_gating(adev); + } + + return 0; +} + +static int jpeg_v2_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v2_0_stop(adev); + else + ret = jpeg_v2_0_start(adev); + + if (!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v2_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { + .name = "jpeg_v2_0", + .early_init = jpeg_v2_0_early_init, + .late_init = NULL, + .sw_init = jpeg_v2_0_sw_init, + .sw_fini = jpeg_v2_0_sw_fini, + .hw_init = jpeg_v2_0_hw_init, + .hw_fini = jpeg_v2_0_hw_fini, + .suspend = jpeg_v2_0_suspend, + .resume = jpeg_v2_0_resume, + .is_idle = jpeg_v2_0_is_idle, + .wait_for_idle = jpeg_v2_0_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v2_0_set_clockgating_state, + .set_powergating_state = jpeg_v2_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = jpeg_v2_0_dec_ring_get_rptr, + .get_wptr = jpeg_v2_0_dec_ring_get_wptr, + .set_wptr = jpeg_v2_0_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs; + DRM_INFO("JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = { + .set = jpeg_v2_0_set_interrupt_state, + .process = jpeg_v2_0_process_interrupt, +}; + +static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs; +} + +const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &jpeg_v2_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h new file mode 100644 index 000000000000..1a03baa59755 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V2_0_H__ +#define __JPEG_V2_0_H__ + +#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff +#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 +#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a +#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb +#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf +#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 +#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed +#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 +#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 +#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 + +void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); +void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); +void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags); +void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, uint32_t flags); +void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); +void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr); +void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count); + +extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block; + +#endif /* __JPEG_V2_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c new file mode 100644 index 000000000000..a29c86617fb5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -0,0 +1,732 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v2_0.h" + +#include "vcn/vcn_2_5_offset.h" +#include "vcn/vcn_2_5_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2 + +static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state); + +static int amdgpu_ih_clientid_jpeg[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + +/** + * jpeg_v2_5_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v2_5_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 harvest; + int i; + + adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->jpeg.harvest_config |= 1 << i; + } + if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | + AMDGPU_JPEG_HARVEST_JPEG1)) + return -ENOENT; + + jpeg_v2_5_set_dec_ring_funcs(adev); + jpeg_v2_5_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v2_5_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v2_5_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int i, r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq); + if (r) + return r; + } + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; + sprintf(ring->name, "jpeg_dec_%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, + 0, AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); + } + + return 0; +} + +/** + * jpeg_v2_5_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v2_5_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v2_5_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v2_5_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i, r; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + + DRM_INFO("JPEG decode initialized successfully.\n"); + + return 0; +} + +/** + * jpeg_v2_5_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v2_5_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) + jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + } + + return 0; +} + +/** + * jpeg_v2_5_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v2_5_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v2_5_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v2_5_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v2_5_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v2_5_hw_init(adev); + + return r; +} + +static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); + data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK + | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK + | JPEG_CGC_CTRL__JMCIF_MODE_MASK + | JPEG_CGC_CTRL__JRBBM_MODE_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); +} + +static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); +} + +/** + * jpeg_v2_5_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v2_5_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* JPEG disable CGC */ + jpeg_v2_5_disable_clock_gating(adev, i); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR); + } + + return 0; +} + +/** + * jpeg_v2_5_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v2_5_stop(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v2_5_enable_clock_gating(adev, i); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + } + + return 0; +} + +/** + * jpeg_v2_5_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v2_5_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v2_5_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +/** + * jpeg_v2_6_dec_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14))); +} + +/** + * jpeg_v2_6_dec_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14))); +} + +static bool jpeg_v2_5_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + } + + return ret; +} + +static int jpeg_v2_5_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ret = SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + if (ret) + return ret; + } + + return 0; +} + +static int jpeg_v2_5_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE); + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (enable) { + if (!jpeg_v2_5_is_idle(handle)) + return -EBUSY; + jpeg_v2_5_enable_clock_gating(adev, i); + } else { + jpeg_v2_5_disable_clock_gating(adev, i); + } + } + + return 0; +} + +static int jpeg_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if(state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v2_5_stop(adev); + else + ret = jpeg_v2_5_start(adev); + + if(!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { + .name = "jpeg_v2_5", + .early_init = jpeg_v2_5_early_init, + .late_init = NULL, + .sw_init = jpeg_v2_5_sw_init, + .sw_fini = jpeg_v2_5_sw_fini, + .hw_init = jpeg_v2_5_hw_init, + .hw_fini = jpeg_v2_5_hw_fini, + .suspend = jpeg_v2_5_suspend, + .resume = jpeg_v2_5_resume, + .is_idle = jpeg_v2_5_is_idle, + .wait_for_idle = jpeg_v2_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v2_5_set_clockgating_state, + .set_powergating_state = jpeg_v2_5_set_powergating_state, +}; + +static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = { + .name = "jpeg_v2_6", + .early_init = jpeg_v2_5_early_init, + .late_init = NULL, + .sw_init = jpeg_v2_5_sw_init, + .sw_fini = jpeg_v2_5_sw_fini, + .hw_init = jpeg_v2_5_hw_init, + .hw_fini = jpeg_v2_5_hw_fini, + .suspend = jpeg_v2_5_suspend, + .resume = jpeg_v2_5_resume, + .is_idle = jpeg_v2_5_is_idle, + .wait_for_idle = jpeg_v2_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v2_5_set_clockgating_state, + .set_powergating_state = jpeg_v2_5_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = jpeg_v2_5_dec_ring_get_rptr, + .get_wptr = jpeg_v2_5_dec_ring_get_wptr, + .set_wptr = jpeg_v2_5_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = jpeg_v2_5_dec_ring_get_rptr, + .get_wptr = jpeg_v2_5_dec_ring_get_wptr, + .set_wptr = jpeg_v2_5_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_6_dec_ring_insert_start, + .insert_end = jpeg_v2_6_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + if (adev->asic_type == CHIP_ARCTURUS) + adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs; + else /* CHIP_ALDEBARAN */ + adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec.me = i; + DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i); + } +} + +static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { + .set = jpeg_v2_5_set_interrupt_state, + .process = jpeg_v2_5_process_interrupt, +}; + +static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + adev->jpeg.inst[i].irq.num_types = 1; + adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + } +} + +const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 2, + .minor = 5, + .rev = 0, + .funcs = &jpeg_v2_5_ip_funcs, +}; + +const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 2, + .minor = 6, + .rev = 0, + .funcs = &jpeg_v2_6_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h new file mode 100644 index 000000000000..3b0aa29b9879 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V2_5_H__ +#define __JPEG_V2_5_H__ + +extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block; +extern const struct amdgpu_ip_block_version jpeg_v2_6_ip_block; + +#endif /* __JPEG_V2_5_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c new file mode 100644 index 000000000000..01c242c5abc3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -0,0 +1,608 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v2_0.h" + +#include "vcn/vcn_3_0_0_offset.h" +#include "vcn/vcn_3_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v3_0_set_powergating_state(void *handle, + enum amd_powergating_state state); + +/** + * jpeg_v3_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v3_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->asic_type != CHIP_YELLOW_CARP) { + u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); + + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + return -ENOENT; + } + + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v3_0_set_dec_ring_funcs(adev); + jpeg_v3_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v3_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v3_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); + if (r) + return r; + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v3_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v3_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v3_0_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v3_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + + DRM_INFO("JPEG decode initialized successfully.\n"); + + return 0; +} + +/** + * jpeg_v3_0_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v3_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) + jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + + return 0; +} + +/** + * jpeg_v3_0_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v3_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v3_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v3_0_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v3_0_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v3_0_hw_init(adev); + + return r; +} + +static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JPEG_ENC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK + | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK + | JPEG_CGC_CTRL__JMCIF_MODE_MASK + | JPEG_CGC_CTRL__JRBBM_MODE_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); +} + +static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); +} + +static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev) +{ + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + uint32_t data = 0; + int r = 0; + + data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + r = SOC15_WAIT_ON_RREG(JPEG, 0, + mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + + if (r) { + DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); + return r; + } + } + + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* keep the JPEG in static PG mode */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); + + return 0; +} + +static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev) +{ + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + uint32_t data = 0; + int r = 0; + + data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, + (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); + + if (r) { + DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); + return r; + } + } + + return 0; +} + +/** + * jpeg_v3_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v3_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, true); + + /* disable power gating */ + r = jpeg_v3_0_disable_static_power_gating(adev); + if (r) + return r; + + /* JPEG disable CGC */ + jpeg_v3_0_disable_clock_gating(adev); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v3_0_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v3_0_stop(struct amdgpu_device *adev) +{ + int r; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v3_0_enable_clock_gating(adev); + + /* enable power gating */ + r = jpeg_v3_0_enable_static_power_gating(adev); + if (r) + return r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, false); + + return 0; +} + +/** + * jpeg_v3_0_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v3_0_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v3_0_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static bool jpeg_v3_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 1; + + ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + + return ret; +} + +static int jpeg_v3_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); +} + +static int jpeg_v3_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + if (!jpeg_v3_0_is_idle(handle)) + return -EBUSY; + jpeg_v3_0_enable_clock_gating(adev); + } else { + jpeg_v3_0_disable_clock_gating(adev); + } + + return 0; +} + +static int jpeg_v3_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if(state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v3_0_stop(adev); + else + ret = jpeg_v3_0_start(adev); + + if(!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { + .name = "jpeg_v3_0", + .early_init = jpeg_v3_0_early_init, + .late_init = NULL, + .sw_init = jpeg_v3_0_sw_init, + .sw_fini = jpeg_v3_0_sw_fini, + .hw_init = jpeg_v3_0_hw_init, + .hw_fini = jpeg_v3_0_hw_fini, + .suspend = jpeg_v3_0_suspend, + .resume = jpeg_v3_0_resume, + .is_idle = jpeg_v3_0_is_idle, + .wait_for_idle = jpeg_v3_0_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v3_0_set_clockgating_state, + .set_powergating_state = jpeg_v3_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = jpeg_v3_0_dec_ring_get_rptr, + .get_wptr = jpeg_v3_0_dec_ring_get_wptr, + .set_wptr = jpeg_v3_0_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v3_0_dec_ring_vm_funcs; + DRM_INFO("JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = { + .set = jpeg_v3_0_set_interrupt_state, + .process = jpeg_v3_0_process_interrupt, +}; + +static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs; +} + +const struct amdgpu_ip_block_version jpeg_v3_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &jpeg_v3_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.h new file mode 100644 index 000000000000..ce775a0c742f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V3_0_H__ +#define __JPEG_V3_0_H__ + +extern const struct amdgpu_ip_block_version jpeg_v3_0_ip_block; + +#endif /* __JPEG_V3_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c deleted file mode 100644 index 4b3faaccecb9..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ /dev/null @@ -1,3382 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "cikd.h" -#include "atom.h" -#include "amdgpu_atombios.h" -#include "amdgpu_dpm.h" -#include "kv_dpm.h" -#include "gfx_v7_0.h" -#include <linux/seq_file.h> - -#include "smu/smu_7_0_0_d.h" -#include "smu/smu_7_0_0_sh_mask.h" - -#include "gca/gfx_7_2_d.h" -#include "gca/gfx_7_2_sh_mask.h" - -#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define KV_MINIMUM_ENGINE_CLOCK 800 -#define SMC_RAM_END 0x40000 - -static const struct amd_pm_funcs kv_dpm_funcs; - -static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); -static int kv_enable_nb_dpm(struct amdgpu_device *adev, - bool enable); -static void kv_init_graphics_levels(struct amdgpu_device *adev); -static int kv_calculate_ds_divider(struct amdgpu_device *adev); -static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); -static int kv_calculate_dpm_settings(struct amdgpu_device *adev); -static void kv_enable_new_levels(struct amdgpu_device *adev); -static void kv_program_nbps_index_settings(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps); -static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); -static int kv_set_enabled_levels(struct amdgpu_device *adev); -static int kv_force_dpm_highest(struct amdgpu_device *adev); -static int kv_force_dpm_lowest(struct amdgpu_device *adev); -static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps, - struct amdgpu_ps *old_rps); -static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, - int min_temp, int max_temp); -static int kv_init_fps_limits(struct amdgpu_device *adev); - -static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); -static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); - - -static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, - struct sumo_vid_mapping_table *vid_mapping_table, - u32 vid_2bit) -{ - struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 i; - - if (vddc_sclk_table && vddc_sclk_table->count) { - if (vid_2bit < vddc_sclk_table->count) - return vddc_sclk_table->entries[vid_2bit].v; - else - return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; - } else { - for (i = 0; i < vid_mapping_table->num_entries; i++) { - if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) - return vid_mapping_table->entries[i].vid_7bit; - } - return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; - } -} - -static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, - struct sumo_vid_mapping_table *vid_mapping_table, - u32 vid_7bit) -{ - struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 i; - - if (vddc_sclk_table && vddc_sclk_table->count) { - for (i = 0; i < vddc_sclk_table->count; i++) { - if (vddc_sclk_table->entries[i].v == vid_7bit) - return i; - } - return vddc_sclk_table->count - 1; - } else { - for (i = 0; i < vid_mapping_table->num_entries; i++) { - if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) - return vid_mapping_table->entries[i].vid_2bit; - } - - return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; - } -} - -static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) -{ -/* This bit selects who handles display phy powergating. - * Clear the bit to let atom handle it. - * Set it to let the driver handle it. - * For now we just let atom handle it. - */ -#if 0 - u32 v = RREG32(mmDOUT_SCRATCH3); - - if (enable) - v |= 0x4; - else - v &= 0xFFFFFFFB; - - WREG32(mmDOUT_SCRATCH3, v); -#endif -} - -static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, - struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, - ATOM_AVAILABLE_SCLK_LIST *table) -{ - u32 i; - u32 n = 0; - u32 prev_sclk = 0; - - for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { - if (table[i].ulSupportedSCLK > prev_sclk) { - sclk_voltage_mapping_table->entries[n].sclk_frequency = - table[i].ulSupportedSCLK; - sclk_voltage_mapping_table->entries[n].vid_2bit = - table[i].usVoltageIndex; - prev_sclk = table[i].ulSupportedSCLK; - n++; - } - } - - sclk_voltage_mapping_table->num_max_dpm_entries = n; -} - -static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, - struct sumo_vid_mapping_table *vid_mapping_table, - ATOM_AVAILABLE_SCLK_LIST *table) -{ - u32 i, j; - - for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { - if (table[i].ulSupportedSCLK != 0) { - vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = - table[i].usVoltageID; - vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = - table[i].usVoltageIndex; - } - } - - for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { - if (vid_mapping_table->entries[i].vid_7bit == 0) { - for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { - if (vid_mapping_table->entries[j].vid_7bit != 0) { - vid_mapping_table->entries[i] = - vid_mapping_table->entries[j]; - vid_mapping_table->entries[j].vid_7bit = 0; - break; - } - } - - if (j == SUMO_MAX_NUMBER_VOLTAGES) - break; - } - } - - vid_mapping_table->num_entries = i; -} - -#if 0 -static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 1, 4, 1 }, - { 2, 5, 1 }, - { 3, 4, 2 }, - { 4, 1, 1 }, - { 5, 5, 2 }, - { 6, 6, 1 }, - { 7, 9, 2 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 1, 4, 1 }, - { 2, 5, 1 }, - { 3, 4, 1 }, - { 4, 1, 1 }, - { 5, 5, 1 }, - { 6, 6, 1 }, - { 7, 9, 1 }, - { 8, 4, 1 }, - { 9, 2, 1 }, - { 10, 3, 1 }, - { 11, 6, 1 }, - { 12, 8, 2 }, - { 13, 1, 1 }, - { 14, 2, 1 }, - { 15, 3, 1 }, - { 16, 1, 1 }, - { 17, 4, 1 }, - { 18, 3, 1 }, - { 19, 1, 1 }, - { 20, 8, 1 }, - { 21, 5, 1 }, - { 22, 1, 1 }, - { 23, 1, 1 }, - { 24, 4, 1 }, - { 27, 6, 1 }, - { 28, 1, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_reg sx0_cac_config_reg[] = -{ - { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc0_cac_config_reg[] = -{ - { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc1_cac_config_reg[] = -{ - { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc2_cac_config_reg[] = -{ - { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc3_cac_config_reg[] = -{ - { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg cpl_cac_config_reg[] = -{ - { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; -#endif - -static const struct kv_pt_config_reg didt_config_kv[] = -{ - { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0xFFFFFFFF } -}; - -static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) -{ - struct kv_ps *ps = rps->ps_priv; - - return ps; -} - -static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -#if 0 -static void kv_program_local_cac_table(struct amdgpu_device *adev, - const struct kv_lcac_config_values *local_cac_table, - const struct kv_lcac_config_reg *local_cac_reg) -{ - u32 i, count, data; - const struct kv_lcac_config_values *values = local_cac_table; - - while (values->block_id != 0xffffffff) { - count = values->signal_id; - for (i = 0; i < count; i++) { - data = ((values->block_id << local_cac_reg->block_shift) & - local_cac_reg->block_mask); - data |= ((i << local_cac_reg->signal_shift) & - local_cac_reg->signal_mask); - data |= ((values->t << local_cac_reg->t_shift) & - local_cac_reg->t_mask); - data |= ((1 << local_cac_reg->enable_shift) & - local_cac_reg->enable_mask); - WREG32_SMC(local_cac_reg->cntl, data); - } - values++; - } -} -#endif - -static int kv_program_pt_config_registers(struct amdgpu_device *adev, - const struct kv_pt_config_reg *cac_config_regs) -{ - const struct kv_pt_config_reg *config_regs = cac_config_regs; - u32 data; - u32 cache = 0; - - if (config_regs == NULL) - return -EINVAL; - - while (config_regs->offset != 0xFFFFFFFF) { - if (config_regs->type == KV_CONFIGREG_CACHE) { - cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); - } else { - switch (config_regs->type) { - case KV_CONFIGREG_SMC_IND: - data = RREG32_SMC(config_regs->offset); - break; - case KV_CONFIGREG_DIDT_IND: - data = RREG32_DIDT(config_regs->offset); - break; - default: - data = RREG32(config_regs->offset); - break; - } - - data &= ~config_regs->mask; - data |= ((config_regs->value << config_regs->shift) & config_regs->mask); - data |= cache; - cache = 0; - - switch (config_regs->type) { - case KV_CONFIGREG_SMC_IND: - WREG32_SMC(config_regs->offset, data); - break; - case KV_CONFIGREG_DIDT_IND: - WREG32_DIDT(config_regs->offset, data); - break; - default: - WREG32(config_regs->offset, data); - break; - } - } - config_regs++; - } - - return 0; -} - -static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 data; - - if (pi->caps_sq_ramping) { - data = RREG32_DIDT(ixDIDT_SQ_CTRL0); - if (enable) - data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_SQ_CTRL0, data); - } - - if (pi->caps_db_ramping) { - data = RREG32_DIDT(ixDIDT_DB_CTRL0); - if (enable) - data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_DB_CTRL0, data); - } - - if (pi->caps_td_ramping) { - data = RREG32_DIDT(ixDIDT_TD_CTRL0); - if (enable) - data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_TD_CTRL0, data); - } - - if (pi->caps_tcp_ramping) { - data = RREG32_DIDT(ixDIDT_TCP_CTRL0); - if (enable) - data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_TCP_CTRL0, data); - } -} - -static int kv_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - if (pi->caps_sq_ramping || - pi->caps_db_ramping || - pi->caps_td_ramping || - pi->caps_tcp_ramping) { - amdgpu_gfx_rlc_enter_safe_mode(adev); - - if (enable) { - ret = kv_program_pt_config_registers(adev, didt_config_kv); - if (ret) { - amdgpu_gfx_rlc_exit_safe_mode(adev); - return ret; - } - } - - kv_do_enable_didt(adev, enable); - - amdgpu_gfx_rlc_exit_safe_mode(adev); - } - - return 0; -} - -#if 0 -static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->caps_cac) { - WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); - WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); - kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); - - WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); - - WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); - - WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); - - WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); - - WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); - WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); - kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); - } -} -#endif - -static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - - if (pi->caps_cac) { - if (enable) { - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); - if (ret) - pi->cac_enabled = false; - else - pi->cac_enabled = true; - } else if (pi->cac_enabled) { - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); - pi->cac_enabled = false; - } - } - - return ret; -} - -static int kv_process_firmware_header(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, DpmTable), - &tmp, pi->sram_end); - - if (ret == 0) - pi->dpm_table_start = tmp; - - ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, SoftRegisters), - &tmp, pi->sram_end); - - if (ret == 0) - pi->soft_regs_start = tmp; - - return ret; -} - -static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->graphics_voltage_change_enable = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), - &pi->graphics_voltage_change_enable, - sizeof(u8), pi->sram_end); - - return ret; -} - -static int kv_set_dpm_interval(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->graphics_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), - &pi->graphics_interval, - sizeof(u8), pi->sram_end); - - return ret; -} - -static int kv_set_dpm_boot_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), - &pi->graphics_boot_level, - sizeof(u8), pi->sram_end); - - return ret; -} - -static void kv_program_vc(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); -} - -static void kv_clear_vc(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); -} - -static int kv_set_divider_value(struct amdgpu_device *adev, - u32 index, u32 sclk) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct atom_clock_dividers dividers; - int ret; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - sclk, false, ÷rs); - if (ret) - return ret; - - pi->graphics_level[index].SclkDid = (u8)dividers.post_div; - pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); - - return 0; -} - -static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, - u16 voltage) -{ - return 6200 - (voltage * 25); -} - -static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, - u32 vid_2bit) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 vid_8bit = kv_convert_vid2_to_vid7(adev, - &pi->sys_info.vid_mapping_table, - vid_2bit); - - return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); -} - - -static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; - pi->graphics_level[index].MinVddNb = - cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); - - return 0; -} - -static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].AT = cpu_to_be16((u16)at); - - return 0; -} - -static void kv_dpm_power_level_enable(struct amdgpu_device *adev, - u32 index, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; -} - -static void kv_start_dpm(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); - - tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - - amdgpu_kv_smc_dpm_enable(adev, true); -} - -static void kv_stop_dpm(struct amdgpu_device *adev) -{ - amdgpu_kv_smc_dpm_enable(adev, false); -} - -static void kv_start_am(struct amdgpu_device *adev) -{ - u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - - sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | - SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); - sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; - - WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); -} - -static void kv_reset_am(struct amdgpu_device *adev) -{ - u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - - sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | - SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); - - WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); -} - -static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) -{ - return amdgpu_kv_notify_message_to_smu(adev, freeze ? - PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); -} - -static int kv_force_lowest_valid(struct amdgpu_device *adev) -{ - return kv_force_dpm_lowest(adev); -} - -static int kv_unforce_levels(struct amdgpu_device *adev) -{ - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); - else - return kv_set_enabled_levels(adev); -} - -static int kv_update_sclk_t(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 low_sclk_interrupt_t = 0; - int ret = 0; - - if (pi->caps_sclk_throttle_low_notification) { - low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), - (u8 *)&low_sclk_interrupt_t, - sizeof(u32), pi->sram_end); - } - return ret; -} - -static int kv_program_bootup_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].clk == pi->boot_pl.sclk) - break; - } - - pi->graphics_boot_level = (u8)i; - kv_dpm_power_level_enable(adev, i, true); - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - if (table->num_max_dpm_entries == 0) - return -EINVAL; - - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) - break; - } - - pi->graphics_boot_level = (u8)i; - kv_dpm_power_level_enable(adev, i, true); - } - return 0; -} - -static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->graphics_therm_throttle_enable = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), - &pi->graphics_therm_throttle_enable, - sizeof(u8), pi->sram_end); - - return ret; -} - -static int kv_upload_dpm_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), - (u8 *)&pi->graphics_level, - sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, - pi->sram_end); - - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), - &pi->graphics_dpm_level_count, - sizeof(u8), pi->sram_end); - - return ret; -} - -static u32 kv_get_clock_difference(u32 a, u32 b) -{ - return (a >= b) ? a - b : b - a; -} - -static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 value; - - if (pi->caps_enable_dfs_bypass) { - if (kv_get_clock_difference(clk, 40000) < 200) - value = 3; - else if (kv_get_clock_difference(clk, 30000) < 200) - value = 2; - else if (kv_get_clock_difference(clk, 20000) < 200) - value = 7; - else if (kv_get_clock_difference(clk, 15000) < 200) - value = 6; - else if (kv_get_clock_difference(clk, 10000) < 200) - value = 8; - else - value = 0; - } else { - value = 0; - } - - return value; -} - -static int kv_populate_uvd_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - int ret; - u32 i; - - if (table == NULL || table->count == 0) - return 0; - - pi->uvd_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < table->entries[i].v)) - break; - - pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); - pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); - pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); - - pi->uvd_level[i].VClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); - pi->uvd_level[i].DClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].vclk, false, ÷rs); - if (ret) - return ret; - pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].dclk, false, ÷rs); - if (ret) - return ret; - pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; - - pi->uvd_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), - (u8 *)&pi->uvd_level_count, - sizeof(u8), pi->sram_end); - if (ret) - return ret; - - pi->uvd_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UVDInterval), - &pi->uvd_interval, - sizeof(u8), pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UvdLevel), - (u8 *)&pi->uvd_level, - sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, - pi->sram_end); - - return ret; - -} - -static int kv_populate_vce_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - u32 i; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - - if (table == NULL || table->count == 0) - return 0; - - pi->vce_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - pi->high_voltage_t < table->entries[i].v) - break; - - pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); - pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); - - pi->vce_level[i].ClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].evclk, false, ÷rs); - if (ret) - return ret; - pi->vce_level[i].Divider = (u8)dividers.post_div; - - pi->vce_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VceLevelCount), - (u8 *)&pi->vce_level_count, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - pi->vce_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VCEInterval), - (u8 *)&pi->vce_interval, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VceLevel), - (u8 *)&pi->vce_level, - sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, - pi->sram_end); - - return ret; -} - -static int kv_populate_samu_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - int ret; - u32 i; - - if (table == NULL || table->count == 0) - return 0; - - pi->samu_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - pi->high_voltage_t < table->entries[i].v) - break; - - pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); - pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); - - pi->samu_level[i].ClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].clk); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].clk, false, ÷rs); - if (ret) - return ret; - pi->samu_level[i].Divider = (u8)dividers.post_div; - - pi->samu_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), - (u8 *)&pi->samu_level_count, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - pi->samu_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SAMUInterval), - (u8 *)&pi->samu_interval, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SamuLevel), - (u8 *)&pi->samu_level, - sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, - pi->sram_end); - if (ret) - return ret; - - return ret; -} - - -static int kv_populate_acp_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - int ret; - u32 i; - - if (table == NULL || table->count == 0) - return 0; - - pi->acp_level_count = 0; - for (i = 0; i < table->count; i++) { - pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); - pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].clk, false, ÷rs); - if (ret) - return ret; - pi->acp_level[i].Divider = (u8)dividers.post_div; - - pi->acp_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), - (u8 *)&pi->acp_level_count, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - pi->acp_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, ACPInterval), - (u8 *)&pi->acp_interval, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, AcpLevel), - (u8 *)&pi->acp_level, - sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, - pi->sram_end); - if (ret) - return ret; - - return ret; -} - -static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - for (i = 0; i < pi->graphics_dpm_level_count; i++) { - if (pi->caps_enable_dfs_bypass) { - if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) - pi->graphics_level[i].ClkBypassCntl = 3; - else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) - pi->graphics_level[i].ClkBypassCntl = 2; - else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) - pi->graphics_level[i].ClkBypassCntl = 7; - else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) - pi->graphics_level[i].ClkBypassCntl = 6; - else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) - pi->graphics_level[i].ClkBypassCntl = 8; - else - pi->graphics_level[i].ClkBypassCntl = 0; - } else { - pi->graphics_level[i].ClkBypassCntl = 0; - } - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - for (i = 0; i < pi->graphics_dpm_level_count; i++) { - if (pi->caps_enable_dfs_bypass) { - if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) - pi->graphics_level[i].ClkBypassCntl = 3; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) - pi->graphics_level[i].ClkBypassCntl = 2; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) - pi->graphics_level[i].ClkBypassCntl = 7; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) - pi->graphics_level[i].ClkBypassCntl = 6; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) - pi->graphics_level[i].ClkBypassCntl = 8; - else - pi->graphics_level[i].ClkBypassCntl = 0; - } else { - pi->graphics_level[i].ClkBypassCntl = 0; - } - } - } -} - -static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); -} - -static void kv_reset_acp_boot_level(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->acp_boot_level = 0xff; -} - -static void kv_update_current_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct kv_ps *new_ps = kv_get_ps(rps); - struct kv_power_info *pi = kv_get_pi(adev); - - pi->current_rps = *rps; - pi->current_ps = *new_ps; - pi->current_rps.ps_priv = &pi->current_ps; - adev->pm.dpm.current_ps = &pi->current_rps; -} - -static void kv_update_requested_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct kv_ps *new_ps = kv_get_ps(rps); - struct kv_power_info *pi = kv_get_pi(adev); - - pi->requested_rps = *rps; - pi->requested_ps = *new_ps; - pi->requested_rps.ps_priv = &pi->requested_ps; - adev->pm.dpm.requested_ps = &pi->requested_rps; -} - -static void kv_dpm_enable_bapm(void *handle, bool enable) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - if (pi->bapm_enable) { - ret = amdgpu_kv_smc_bapm_enable(adev, enable); - if (ret) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); - } -} - -static int kv_dpm_enable(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - ret = kv_process_firmware_header(adev); - if (ret) { - DRM_ERROR("kv_process_firmware_header failed\n"); - return ret; - } - kv_init_fps_limits(adev); - kv_init_graphics_levels(adev); - ret = kv_program_bootup_state(adev); - if (ret) { - DRM_ERROR("kv_program_bootup_state failed\n"); - return ret; - } - kv_calculate_dfs_bypass_settings(adev); - ret = kv_upload_dpm_settings(adev); - if (ret) { - DRM_ERROR("kv_upload_dpm_settings failed\n"); - return ret; - } - ret = kv_populate_uvd_table(adev); - if (ret) { - DRM_ERROR("kv_populate_uvd_table failed\n"); - return ret; - } - ret = kv_populate_vce_table(adev); - if (ret) { - DRM_ERROR("kv_populate_vce_table failed\n"); - return ret; - } - ret = kv_populate_samu_table(adev); - if (ret) { - DRM_ERROR("kv_populate_samu_table failed\n"); - return ret; - } - ret = kv_populate_acp_table(adev); - if (ret) { - DRM_ERROR("kv_populate_acp_table failed\n"); - return ret; - } - kv_program_vc(adev); -#if 0 - kv_initialize_hardware_cac_manager(adev); -#endif - kv_start_am(adev); - if (pi->enable_auto_thermal_throttling) { - ret = kv_enable_auto_thermal_throttling(adev); - if (ret) { - DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); - return ret; - } - } - ret = kv_enable_dpm_voltage_scaling(adev); - if (ret) { - DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); - return ret; - } - ret = kv_set_dpm_interval(adev); - if (ret) { - DRM_ERROR("kv_set_dpm_interval failed\n"); - return ret; - } - ret = kv_set_dpm_boot_state(adev); - if (ret) { - DRM_ERROR("kv_set_dpm_boot_state failed\n"); - return ret; - } - ret = kv_enable_ulv(adev, true); - if (ret) { - DRM_ERROR("kv_enable_ulv failed\n"); - return ret; - } - kv_start_dpm(adev); - ret = kv_enable_didt(adev, true); - if (ret) { - DRM_ERROR("kv_enable_didt failed\n"); - return ret; - } - ret = kv_enable_smc_cac(adev, true); - if (ret) { - DRM_ERROR("kv_enable_smc_cac failed\n"); - return ret; - } - - kv_reset_acp_boot_level(adev); - - ret = amdgpu_kv_smc_bapm_enable(adev, false); - if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); - return ret; - } - - if (adev->irq.installed && - amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { - ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); - if (ret) { - DRM_ERROR("kv_set_thermal_temperature_range failed\n"); - return ret; - } - amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); - amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); - } - - return ret; -} - -static void kv_dpm_disable(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); - - amdgpu_kv_smc_bapm_enable(adev, false); - - if (adev->asic_type == CHIP_MULLINS) - kv_enable_nb_dpm(adev, false); - - /* powerup blocks */ - kv_dpm_powergate_acp(adev, false); - kv_dpm_powergate_samu(adev, false); - if (pi->caps_vce_pg) /* power on the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - if (pi->caps_uvd_pg) /* power on the UVD block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); - - kv_enable_smc_cac(adev, false); - kv_enable_didt(adev, false); - kv_clear_vc(adev); - kv_stop_dpm(adev); - kv_enable_ulv(adev, false); - kv_reset_am(adev); - - kv_update_current_ps(adev, adev->pm.dpm.boot_ps); -} - -#if 0 -static int kv_write_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 value) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, - (u8 *)&value, sizeof(u16), pi->sram_end); -} - -static int kv_read_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 *value) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, - value, pi->sram_end); -} -#endif - -static void kv_init_sclk_t(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->low_sclk_interrupt_t = 0; -} - -static int kv_init_fps_limits(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - - if (pi->caps_fps) { - u16 tmp; - - tmp = 45; - pi->fps_high_t = cpu_to_be16(tmp); - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, FpsHighT), - (u8 *)&pi->fps_high_t, - sizeof(u16), pi->sram_end); - - tmp = 30; - pi->fps_low_t = cpu_to_be16(tmp); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, FpsLowT), - (u8 *)&pi->fps_low_t, - sizeof(u16), pi->sram_end); - - } - return ret; -} - -static void kv_init_powergate_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->uvd_power_gated = false; - pi->vce_power_gated = false; - pi->samu_power_gated = false; - pi->acp_power_gated = false; - -} - -static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); -} - -static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); -} - -static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); -} - -static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); -} - -static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - int ret; - u32 mask; - - if (!gate) { - if (table->count) - pi->uvd_boot_level = table->count - 1; - else - pi->uvd_boot_level = 0; - - if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { - mask = 1 << pi->uvd_boot_level; - } else { - mask = 0x1f; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), - (uint8_t *)&pi->uvd_boot_level, - sizeof(u8), pi->sram_end); - if (ret) - return ret; - - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_UVDDPM_SetEnabledMask, - mask); - } - - return kv_enable_uvd_dpm(adev, !gate); -} - -static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) -{ - u8 i; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].evclk >= evclk) - break; - } - - return i; -} - -static int kv_update_vce_dpm(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - int ret; - - if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { - if (pi->caps_stable_p_state) - pi->vce_boot_level = table->count - 1; - else - pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VceBootLevel), - (u8 *)&pi->vce_boot_level, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - if (pi->caps_stable_p_state) - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (1 << pi->vce_boot_level)); - kv_enable_vce_dpm(adev, true); - } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { - kv_enable_vce_dpm(adev, false); - } - - return 0; -} - -static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; - int ret; - - if (!gate) { - if (pi->caps_stable_p_state) - pi->samu_boot_level = table->count - 1; - else - pi->samu_boot_level = 0; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), - (u8 *)&pi->samu_boot_level, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - if (pi->caps_stable_p_state) - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (1 << pi->samu_boot_level)); - } - - return kv_enable_samu_dpm(adev, !gate); -} - -static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) -{ - u8 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].clk >= 0) /* XXX */ - break; - } - - if (i >= table->count) - i = table->count - 1; - - return i; -} - -static void kv_update_acp_boot_level(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u8 acp_boot_level; - - if (!pi->caps_stable_p_state) { - acp_boot_level = kv_get_acp_boot_level(adev); - if (acp_boot_level != pi->acp_boot_level) { - pi->acp_boot_level = acp_boot_level; - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ACPDPM_SetEnabledMask, - (1 << pi->acp_boot_level)); - } - } -} - -static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - int ret; - - if (!gate) { - if (pi->caps_stable_p_state) - pi->acp_boot_level = table->count - 1; - else - pi->acp_boot_level = kv_get_acp_boot_level(adev); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), - (u8 *)&pi->acp_boot_level, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - if (pi->caps_stable_p_state) - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ACPDPM_SetEnabledMask, - (1 << pi->acp_boot_level)); - } - - return kv_enable_acp_dpm(adev, !gate); -} - -static void kv_dpm_powergate_uvd(void *handle, bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->uvd_power_gated = gate; - - if (gate) { - /* stop the UVD block */ - ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - kv_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) - /* power off the UVD block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); - } else { - if (pi->caps_uvd_pg) - /* power on the UVD block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); - /* re-init the UVD block */ - kv_update_uvd_dpm(adev, gate); - - ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - } -} - -static void kv_dpm_powergate_vce(void *handle, bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->vce_power_gated = gate; - - if (gate) { - /* stop the VCE block */ - ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - kv_enable_vce_dpm(adev, false); - if (pi->caps_vce_pg) /* power off the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - } else { - if (pi->caps_vce_pg) /* power on the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - kv_enable_vce_dpm(adev, true); - /* re-init the VCE block */ - ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - } -} - - -static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->samu_power_gated == gate) - return; - - pi->samu_power_gated = gate; - - if (gate) { - kv_update_samu_dpm(adev, true); - if (pi->caps_samu_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); - } else { - if (pi->caps_samu_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); - kv_update_samu_dpm(adev, false); - } -} - -static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->acp_power_gated == gate) - return; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return; - - pi->acp_power_gated = gate; - - if (gate) { - kv_update_acp_dpm(adev, true); - if (pi->caps_acp_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); - } else { - if (pi->caps_acp_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); - kv_update_acp_dpm(adev, false); - } -} - -static void kv_set_valid_clock_range(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps) -{ - struct kv_ps *new_ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - for (i = 0; i < pi->graphics_dpm_level_count; i++) { - if ((table->entries[i].clk >= new_ps->levels[0].sclk) || - (i == (pi->graphics_dpm_level_count - 1))) { - pi->lowest_valid = i; - break; - } - } - - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) - break; - } - pi->highest_valid = i; - - if (pi->lowest_valid > pi->highest_valid) { - if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > - (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) - pi->highest_valid = pi->lowest_valid; - else - pi->lowest_valid = pi->highest_valid; - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { - if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || - i == (int)(pi->graphics_dpm_level_count - 1)) { - pi->lowest_valid = i; - break; - } - } - - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].sclk_frequency <= - new_ps->levels[new_ps->num_levels - 1].sclk) - break; - } - pi->highest_valid = i; - - if (pi->lowest_valid > pi->highest_valid) { - if ((new_ps->levels[0].sclk - - table->entries[pi->highest_valid].sclk_frequency) > - (table->entries[pi->lowest_valid].sclk_frequency - - new_ps->levels[new_ps->num_levels -1].sclk)) - pi->highest_valid = pi->lowest_valid; - else - pi->lowest_valid = pi->highest_valid; - } - } -} - -static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps) -{ - struct kv_ps *new_ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - u8 clk_bypass_cntl; - - if (pi->caps_enable_dfs_bypass) { - clk_bypass_cntl = new_ps->need_dfs_bypass ? - pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; - ret = amdgpu_kv_copy_bytes_to_smc(adev, - (pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + - (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + - offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), - &clk_bypass_cntl, - sizeof(u8), pi->sram_end); - } - - return ret; -} - -static int kv_enable_nb_dpm(struct amdgpu_device *adev, - bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - - if (enable) { - if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); - if (ret == 0) - pi->nb_dpm_enabled = true; - } - } else { - if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); - if (ret == 0) - pi->nb_dpm_enabled = false; - } - } - - return ret; -} - -static int kv_dpm_force_performance_level(void *handle, - enum amd_dpm_forced_level level) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (level == AMD_DPM_FORCED_LEVEL_HIGH) { - ret = kv_force_dpm_highest(adev); - if (ret) - return ret; - } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { - ret = kv_force_dpm_lowest(adev); - if (ret) - return ret; - } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { - ret = kv_unforce_levels(adev); - if (ret) - return ret; - } - - adev->pm.dpm.forced_level = level; - - return 0; -} - -static int kv_dpm_pre_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; - struct amdgpu_ps *new_ps = &requested_ps; - - kv_update_requested_ps(adev, new_ps); - - kv_apply_state_adjust_rules(adev, - &pi->requested_rps, - &pi->current_rps); - - return 0; -} - -static int kv_dpm_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_ps *new_ps = &pi->requested_rps; - struct amdgpu_ps *old_ps = &pi->current_rps; - int ret; - - if (pi->bapm_enable) { - ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); - if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); - return ret; - } - } - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - if (pi->enable_dpm) { - kv_set_valid_clock_range(adev, new_ps); - kv_update_dfs_bypass_settings(adev, new_ps); - ret = kv_calculate_ds_divider(adev); - if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); - return ret; - } - kv_calculate_nbps_level_settings(adev); - kv_calculate_dpm_settings(adev); - kv_force_lowest_valid(adev); - kv_enable_new_levels(adev); - kv_upload_dpm_settings(adev); - kv_program_nbps_index_settings(adev, new_ps); - kv_unforce_levels(adev); - kv_set_enabled_levels(adev); - kv_force_lowest_valid(adev); - kv_unforce_levels(adev); - - ret = kv_update_vce_dpm(adev, new_ps, old_ps); - if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); - return ret; - } - kv_update_sclk_t(adev); - if (adev->asic_type == CHIP_MULLINS) - kv_enable_nb_dpm(adev, true); - } - } else { - if (pi->enable_dpm) { - kv_set_valid_clock_range(adev, new_ps); - kv_update_dfs_bypass_settings(adev, new_ps); - ret = kv_calculate_ds_divider(adev); - if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); - return ret; - } - kv_calculate_nbps_level_settings(adev); - kv_calculate_dpm_settings(adev); - kv_freeze_sclk_dpm(adev, true); - kv_upload_dpm_settings(adev); - kv_program_nbps_index_settings(adev, new_ps); - kv_freeze_sclk_dpm(adev, false); - kv_set_enabled_levels(adev); - ret = kv_update_vce_dpm(adev, new_ps, old_ps); - if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); - return ret; - } - kv_update_acp_boot_level(adev); - kv_update_sclk_t(adev); - kv_enable_nb_dpm(adev, true); - } - } - - return 0; -} - -static void kv_dpm_post_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_ps *new_ps = &pi->requested_rps; - - kv_update_current_ps(adev, new_ps); -} - -static void kv_dpm_setup_asic(struct amdgpu_device *adev) -{ - sumo_take_smu_control(adev, true); - kv_init_powergate_state(adev); - kv_init_sclk_t(adev); -} - -#if 0 -static void kv_dpm_reset_asic(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - kv_force_lowest_valid(adev); - kv_init_graphics_levels(adev); - kv_program_bootup_state(adev); - kv_upload_dpm_settings(adev); - kv_force_lowest_valid(adev); - kv_unforce_levels(adev); - } else { - kv_init_graphics_levels(adev); - kv_program_bootup_state(adev); - kv_freeze_sclk_dpm(adev, true); - kv_upload_dpm_settings(adev); - kv_freeze_sclk_dpm(adev, false); - kv_set_enabled_level(adev, pi->graphics_boot_level); - } -} -#endif - -static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, - struct amdgpu_clock_and_voltage_limits *table) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { - int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; - table->sclk = - pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; - table->vddc = - kv_convert_2bit_index_to_voltage(adev, - pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); - } - - table->mclk = pi->sys_info.nbp_memory_clock[0]; -} - -static void kv_patch_voltage_values(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *samu_table = - &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *acp_table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - if (uvd_table->count) { - for (i = 0; i < uvd_table->count; i++) - uvd_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - uvd_table->entries[i].v); - } - - if (vce_table->count) { - for (i = 0; i < vce_table->count; i++) - vce_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - vce_table->entries[i].v); - } - - if (samu_table->count) { - for (i = 0; i < samu_table->count; i++) - samu_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - samu_table->entries[i].v); - } - - if (acp_table->count) { - for (i = 0; i < acp_table->count; i++) - acp_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - acp_table->entries[i].v); - } - -} - -static void kv_construct_boot_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->boot_pl.sclk = pi->sys_info.bootup_sclk; - pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; - pi->boot_pl.ds_divider_index = 0; - pi->boot_pl.ss_divider_index = 0; - pi->boot_pl.allow_gnb_slow = 1; - pi->boot_pl.force_nbp_state = 0; - pi->boot_pl.display_wm = 0; - pi->boot_pl.vce_wm = 0; -} - -static int kv_force_dpm_highest(struct amdgpu_device *adev) -{ - int ret; - u32 enable_mask, i; - - ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); - if (ret) - return ret; - - for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { - if (enable_mask & (1 << i)) - break; - } - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); - else - return kv_set_enabled_level(adev, i); -} - -static int kv_force_dpm_lowest(struct amdgpu_device *adev) -{ - int ret; - u32 enable_mask, i; - - ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); - if (ret) - return ret; - - for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { - if (enable_mask & (1 << i)) - break; - } - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); - else - return kv_set_enabled_level(adev, i); -} - -static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, - u32 sclk, u32 min_sclk_in_sr) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - u32 temp; - u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); - - if (sclk < min) - return 0; - - if (!pi->caps_sclk_ds) - return 0; - - for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { - temp = sclk >> i; - if (temp >= min) - break; - } - - return (u8)i; -} - -static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - int i; - - if (table && table->count) { - for (i = table->count - 1; i >= 0; i--) { - if (pi->high_voltage_t && - (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= - pi->high_voltage_t)) { - *limit = i; - return 0; - } - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { - if (pi->high_voltage_t && - (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= - pi->high_voltage_t)) { - *limit = i; - return 0; - } - } - } - - *limit = 0; - return 0; -} - -static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps, - struct amdgpu_ps *old_rps) -{ - struct kv_ps *ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - u32 min_sclk = 10000; /* ??? */ - u32 sclk, mclk = 0; - int i, limit; - bool force_high; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 stable_p_state_sclk = 0; - struct amdgpu_clock_and_voltage_limits *max_limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - - if (new_rps->vce_active) { - new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; - new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; - } else { - new_rps->evclk = 0; - new_rps->ecclk = 0; - } - - mclk = max_limits->mclk; - sclk = min_sclk; - - if (pi->caps_stable_p_state) { - stable_p_state_sclk = (max_limits->sclk * 75) / 100; - - for (i = table->count - 1; i >= 0; i--) { - if (stable_p_state_sclk >= table->entries[i].clk) { - stable_p_state_sclk = table->entries[i].clk; - break; - } - } - - if (i > 0) - stable_p_state_sclk = table->entries[0].clk; - - sclk = stable_p_state_sclk; - } - - if (new_rps->vce_active) { - if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) - sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; - } - - ps->need_dfs_bypass = true; - - for (i = 0; i < ps->num_levels; i++) { - if (ps->levels[i].sclk < sclk) - ps->levels[i].sclk = sclk; - } - - if (table && table->count) { - for (i = 0; i < ps->num_levels; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < - kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { - kv_get_high_voltage_limit(adev, &limit); - ps->levels[i].sclk = table->entries[limit].clk; - } - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - for (i = 0; i < ps->num_levels; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < - kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { - kv_get_high_voltage_limit(adev, &limit); - ps->levels[i].sclk = table->entries[limit].sclk_frequency; - } - } - } - - if (pi->caps_stable_p_state) { - for (i = 0; i < ps->num_levels; i++) { - ps->levels[i].sclk = stable_p_state_sclk; - } - } - - pi->video_start = new_rps->dclk || new_rps->vclk || - new_rps->evclk || new_rps->ecclk; - - if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == - ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) - pi->battery_state = true; - else - pi->battery_state = false; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - ps->dpm0_pg_nb_ps_lo = 0x1; - ps->dpm0_pg_nb_ps_hi = 0x0; - ps->dpmx_nb_ps_lo = 0x1; - ps->dpmx_nb_ps_hi = 0x0; - } else { - ps->dpm0_pg_nb_ps_lo = 0x3; - ps->dpm0_pg_nb_ps_hi = 0x0; - ps->dpmx_nb_ps_lo = 0x3; - ps->dpmx_nb_ps_hi = 0x0; - - if (pi->sys_info.nb_dpm_enable) { - force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || - pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || - pi->disable_nb_ps3_in_battery; - ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; - ps->dpm0_pg_nb_ps_hi = 0x2; - ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; - ps->dpmx_nb_ps_hi = 0x2; - } - } -} - -static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, - u32 index, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; -} - -static int kv_calculate_ds_divider(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 sclk_in_sr = 10000; /* ??? */ - u32 i; - - if (pi->lowest_valid > pi->highest_valid) - return -EINVAL; - - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { - pi->graphics_level[i].DeepSleepDivId = - kv_get_sleep_divider_id_from_clock(adev, - be32_to_cpu(pi->graphics_level[i].SclkFrequency), - sclk_in_sr); - } - return 0; -} - -static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - bool force_high; - struct amdgpu_clock_and_voltage_limits *max_limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - u32 mclk = max_limits->mclk; - - if (pi->lowest_valid > pi->highest_valid) - return -EINVAL; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { - pi->graphics_level[i].GnbSlow = 1; - pi->graphics_level[i].ForceNbPs1 = 0; - pi->graphics_level[i].UpH = 0; - } - - if (!pi->sys_info.nb_dpm_enable) - return 0; - - force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || - (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); - - if (force_high) { - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) - pi->graphics_level[i].GnbSlow = 0; - } else { - if (pi->battery_state) - pi->graphics_level[0].ForceNbPs1 = 1; - - pi->graphics_level[1].GnbSlow = 0; - pi->graphics_level[2].GnbSlow = 0; - pi->graphics_level[3].GnbSlow = 0; - pi->graphics_level[4].GnbSlow = 0; - } - } else { - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { - pi->graphics_level[i].GnbSlow = 1; - pi->graphics_level[i].ForceNbPs1 = 0; - pi->graphics_level[i].UpH = 0; - } - - if (pi->sys_info.nb_dpm_enable && pi->battery_state) { - pi->graphics_level[pi->lowest_valid].UpH = 0x28; - pi->graphics_level[pi->lowest_valid].GnbSlow = 0; - if (pi->lowest_valid != pi->highest_valid) - pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; - } - } - return 0; -} - -static int kv_calculate_dpm_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - - if (pi->lowest_valid > pi->highest_valid) - return -EINVAL; - - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) - pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; - - return 0; -} - -static void kv_init_graphics_levels(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - u32 vid_2bit; - - pi->graphics_dpm_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < - kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) - break; - - kv_set_divider_value(adev, i, table->entries[i].clk); - vid_2bit = kv_convert_vid7_to_vid2(adev, - &pi->sys_info.vid_mapping_table, - table->entries[i].v); - kv_set_vid(adev, i, vid_2bit); - kv_set_at(adev, i, pi->at[i]); - kv_dpm_power_level_enabled_for_throttle(adev, i, true); - pi->graphics_dpm_level_count++; - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - pi->graphics_dpm_level_count = 0; - for (i = 0; i < table->num_max_dpm_entries; i++) { - if (pi->high_voltage_t && - pi->high_voltage_t < - kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) - break; - - kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); - kv_set_vid(adev, i, table->entries[i].vid_2bit); - kv_set_at(adev, i, pi->at[i]); - kv_dpm_power_level_enabled_for_throttle(adev, i, true); - pi->graphics_dpm_level_count++; - } - } - - for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) - kv_dpm_power_level_enable(adev, i, false); -} - -static void kv_enable_new_levels(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - - for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { - if (i >= pi->lowest_valid && i <= pi->highest_valid) - kv_dpm_power_level_enable(adev, i, true); - } -} - -static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) -{ - u32 new_mask = (1 << level); - - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - new_mask); -} - -static int kv_set_enabled_levels(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i, new_mask = 0; - - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) - new_mask |= (1 << i); - - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - new_mask); -} - -static void kv_program_nbps_index_settings(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps) -{ - struct kv_ps *new_ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - u32 nbdpmconfig1; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return; - - if (pi->sys_info.nb_dpm_enable) { - nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); - nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | - NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | - NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | - NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); - nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | - (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | - (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | - (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); - WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); - } -} - -static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, - int min_temp, int max_temp) -{ - int low_temp = 0 * 1000; - int high_temp = 255 * 1000; - u32 tmp; - - if (low_temp < min_temp) - low_temp = min_temp; - if (high_temp > max_temp) - high_temp = max_temp; - if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); - return -EINVAL; - } - - tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | - CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); - tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | - ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); - WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); - - adev->pm.dpm.thermal.min_temp = low_temp; - adev->pm.dpm.thermal.max_temp = high_temp; - - return 0; -} - -union igp_info { - struct _ATOM_INTEGRATED_SYSTEM_INFO info; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; -}; - -static int kv_parse_sys_info_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); - union igp_info *igp_info; - u8 frev, crev; - u16 data_offset; - int i; - - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { - igp_info = (union igp_info *)(mode_info->atom_context->bios + - data_offset); - - if (crev != 8) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); - return -EINVAL; - } - pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); - pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); - pi->sys_info.bootup_nb_voltage_index = - le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); - if (igp_info->info_8.ucHtcTmpLmt == 0) - pi->sys_info.htc_tmp_lmt = 203; - else - pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; - if (igp_info->info_8.ucHtcHystLmt == 0) - pi->sys_info.htc_hyst_lmt = 5; - else - pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; - if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); - } - - if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) - pi->sys_info.nb_dpm_enable = true; - else - pi->sys_info.nb_dpm_enable = false; - - for (i = 0; i < KV_NUM_NBPSTATES; i++) { - pi->sys_info.nbp_memory_clock[i] = - le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); - pi->sys_info.nbp_n_clock[i] = - le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); - } - if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) - pi->caps_enable_dfs_bypass = true; - - sumo_construct_sclk_voltage_mapping_table(adev, - &pi->sys_info.sclk_voltage_mapping_table, - igp_info->info_8.sAvail_SCLK); - - sumo_construct_vid_mapping_table(adev, - &pi->sys_info.vid_mapping_table, - igp_info->info_8.sAvail_SCLK); - - kv_construct_max_power_limits_table(adev, - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); - } - return 0; -} - -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; -}; - -union pplib_clock_info { - struct _ATOM_PPLIB_R600_CLOCK_INFO r600; - struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; - struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; -}; - -union pplib_power_state { - struct _ATOM_PPLIB_STATE v1; - struct _ATOM_PPLIB_STATE_V2 v2; -}; - -static void kv_patch_boot_state(struct amdgpu_device *adev, - struct kv_ps *ps) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - ps->num_levels = 1; - ps->levels[0] = pi->boot_pl; -} - -static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, - u8 table_rev) -{ - struct kv_ps *ps = kv_get_ps(rps); - - rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); - rps->class = le16_to_cpu(non_clock_info->usClassification); - rps->class2 = le16_to_cpu(non_clock_info->usClassification2); - - if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { - rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); - rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else { - rps->vclk = 0; - rps->dclk = 0; - } - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { - adev->pm.dpm.boot_ps = rps; - kv_patch_boot_state(adev, ps); - } - if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - adev->pm.dpm.uvd_ps = rps; -} - -static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, int index, - union pplib_clock_info *clock_info) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct kv_ps *ps = kv_get_ps(rps); - struct kv_pl *pl = &ps->levels[index]; - u32 sclk; - - sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); - sclk |= clock_info->sumo.ucEngineClockHigh << 16; - pl->sclk = sclk; - pl->vddc_index = clock_info->sumo.vddcIndex; - - ps->num_levels = index + 1; - - if (pi->caps_sclk_ds) { - pl->ds_divider_index = 5; - pl->ss_divider_index = 5; - } -} - -static int kv_parse_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - union pplib_power_state *power_state; - int i, j, k, non_clock_array_index, clock_array_index; - union pplib_clock_info *clock_info; - struct _StateArray *state_array; - struct _ClockInfoArray *clock_info_array; - struct _NonClockInfoArray *non_clock_info_array; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - u8 *power_state_offset; - struct kv_ps *ps; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - amdgpu_add_thermal_controller(adev); - - state_array = (struct _StateArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct _ClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct _NonClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - - adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, - sizeof(struct amdgpu_ps), - GFP_KERNEL); - if (!adev->pm.dpm.ps) - return -ENOMEM; - power_state_offset = (u8 *)state_array->states; - for (i = 0; i < state_array->ucNumEntries; i++) { - u8 *idx; - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); - return -ENOMEM; - } - adev->pm.dpm.ps[i].ps_priv = ps; - k = 0; - idx = (u8 *)&power_state->v2.clockInfoIndex[0]; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = idx[j]; - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) - break; - clock_info = (union pplib_clock_info *) - ((u8 *)&clock_info_array->clockInfo[0] + - (clock_array_index * clock_info_array->ucEntrySize)); - kv_parse_pplib_clock_info(adev, - &adev->pm.dpm.ps[i], k, - clock_info); - k++; - } - kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, - non_clock_info_array->ucEntrySize); - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; - } - adev->pm.dpm.num_ps = state_array->ucNumEntries; - - /* fill in the vce power states */ - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - u32 sclk; - clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; - sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); - sclk |= clock_info->sumo.ucEngineClockHigh << 16; - adev->pm.dpm.vce_states[i].sclk = sclk; - adev->pm.dpm.vce_states[i].mclk = 0; - } - - return 0; -} - -static int kv_dpm_init(struct amdgpu_device *adev) -{ - struct kv_power_info *pi; - int ret, i; - - pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); - if (pi == NULL) - return -ENOMEM; - adev->pm.dpm.priv = pi; - - ret = amdgpu_get_platform_caps(adev); - if (ret) - return ret; - - ret = amdgpu_parse_extended_power_table(adev); - if (ret) - return ret; - - for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) - pi->at[i] = TRINITY_AT_DFLT; - - pi->sram_end = SMC_RAM_END; - - pi->enable_nb_dpm = true; - - pi->caps_power_containment = true; - pi->caps_cac = true; - pi->enable_didt = false; - if (pi->enable_didt) { - pi->caps_sq_ramping = true; - pi->caps_db_ramping = true; - pi->caps_td_ramping = true; - pi->caps_tcp_ramping = true; - } - - if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) - pi->caps_sclk_ds = true; - else - pi->caps_sclk_ds = false; - - pi->enable_auto_thermal_throttling = true; - pi->disable_nb_ps3_in_battery = false; - if (amdgpu_bapm == 0) - pi->bapm_enable = false; - else - pi->bapm_enable = true; - pi->voltage_drop_t = 0; - pi->caps_sclk_throttle_low_notification = false; - pi->caps_fps = false; /* true? */ - pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; - pi->caps_uvd_dpm = true; - pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; - pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; - pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; - pi->caps_stable_p_state = false; - - ret = kv_parse_sys_info_table(adev); - if (ret) - return ret; - - kv_patch_voltage_values(adev); - kv_construct_boot_state(adev); - - ret = kv_parse_power_table(adev); - if (ret) - return ret; - - pi->enable_dpm = true; - - return 0; -} - -static void -kv_dpm_debugfs_print_current_performance_level(void *handle, - struct seq_file *m) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - u32 current_index = - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - u32 sclk, tmp; - u16 vddc; - - if (current_index >= SMU__NUM_SCLK_DPM_STATE) { - seq_printf(m, "invalid dpm profile %d\n", current_index); - } else { - sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); - tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; - vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); - seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); - seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); - seq_printf(m, "power level %d sclk: %u vddc: %u\n", - current_index, sclk, vddc); - } -} - -static void -kv_dpm_print_power_state(void *handle, void *request_ps) -{ - int i; - struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; - struct kv_ps *ps = kv_get_ps(rps); - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - for (i = 0; i < ps->num_levels; i++) { - struct kv_pl *pl = &ps->levels[i]; - printk("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); - } - amdgpu_dpm_print_ps_status(adev, rps); -} - -static void kv_dpm_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - kfree(adev->pm.dpm.ps[i].ps_priv); - } - kfree(adev->pm.dpm.ps); - kfree(adev->pm.dpm.priv); - amdgpu_free_extended_power_table(adev); -} - -static void kv_dpm_display_configuration_changed(void *handle) -{ - -} - -static u32 kv_dpm_get_sclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); - - if (low) - return requested_state->levels[0].sclk; - else - return requested_state->levels[requested_state->num_levels - 1].sclk; -} - -static u32 kv_dpm_get_mclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - - return pi->sys_info.bootup_uma_clk; -} - -/* get temperature in millidegrees */ -static int kv_dpm_get_temp(void *handle) -{ - u32 temp; - int actual_temp = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - temp = RREG32_SMC(0xC0300E0C); - - if (temp) - actual_temp = (temp / 8) - 49; - else - actual_temp = 0; - - actual_temp = actual_temp * 1000; - - return actual_temp; -} - -static int kv_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->powerplay.pp_funcs = &kv_dpm_funcs; - adev->powerplay.pp_handle = adev; - kv_dpm_set_irq_funcs(adev); - - return 0; -} - -static int kv_dpm_late_init(void *handle) -{ - /* powerdown unused blocks for now */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - kv_dpm_powergate_acp(adev, true); - kv_dpm_powergate_samu(adev, true); - - return 0; -} - -static int kv_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, - &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, - &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - /* default to balanced state */ - adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; - adev->pm.default_sclk = adev->clock.default_sclk; - adev->pm.default_mclk = adev->clock.default_mclk; - adev->pm.current_sclk = adev->clock.default_sclk; - adev->pm.current_mclk = adev->clock.default_mclk; - adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - - if (amdgpu_dpm == 0) - return 0; - - INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); - mutex_lock(&adev->pm.mutex); - ret = kv_dpm_init(adev); - if (ret) - goto dpm_failed; - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - if (amdgpu_dpm == 1) - amdgpu_pm_print_power_states(adev); - mutex_unlock(&adev->pm.mutex); - DRM_INFO("amdgpu: dpm initialized\n"); - - return 0; - -dpm_failed: - kv_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - DRM_ERROR("amdgpu: dpm initialization failed\n"); - return ret; -} - -static int kv_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - flush_work(&adev->pm.dpm.thermal.work); - - mutex_lock(&adev->pm.mutex); - kv_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static int kv_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!amdgpu_dpm) - return 0; - - mutex_lock(&adev->pm.mutex); - kv_dpm_setup_asic(adev); - ret = kv_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); - return ret; -} - -static int kv_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - kv_dpm_disable(adev); - mutex_unlock(&adev->pm.mutex); - } - - return 0; -} - -static int kv_dpm_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - /* disable dpm */ - kv_dpm_disable(adev); - /* reset the power state */ - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - mutex_unlock(&adev->pm.mutex); - } - return 0; -} - -static int kv_dpm_resume(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - /* asic init will reset to the boot state */ - mutex_lock(&adev->pm.mutex); - kv_dpm_setup_asic(adev); - ret = kv_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - if (adev->pm.dpm_enabled) - amdgpu_pm_compute_clocks(adev); - } - return 0; -} - -static bool kv_dpm_is_idle(void *handle) -{ - return true; -} - -static int kv_dpm_wait_for_idle(void *handle) -{ - return 0; -} - - -static int kv_dpm_soft_reset(void *handle) -{ - return 0; -} - -static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, - unsigned type, - enum amdgpu_interrupt_state state) -{ - u32 cg_thermal_int; - - switch (type) { - case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - default: - break; - } - break; - - case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - default: - break; - } - break; - - default: - break; - } - return 0; -} - -static int kv_dpm_process_interrupt(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - bool queue_thermal = false; - - if (entry == NULL) - return -EINVAL; - - switch (entry->src_id) { - case 230: /* thermal low to high */ - DRM_DEBUG("IH: thermal low to high\n"); - adev->pm.dpm.thermal.high_to_low = false; - queue_thermal = true; - break; - case 231: /* thermal high to low */ - DRM_DEBUG("IH: thermal high to low\n"); - adev->pm.dpm.thermal.high_to_low = true; - queue_thermal = true; - break; - default: - break; - } - - if (queue_thermal) - schedule_work(&adev->pm.dpm.thermal.work); - - return 0; -} - -static int kv_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int kv_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, - const struct kv_pl *kv_cpl2) -{ - return ((kv_cpl1->sclk == kv_cpl2->sclk) && - (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && - (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && - (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); -} - -static int kv_check_state_equal(void *handle, - void *current_ps, - void *request_ps, - bool *equal) -{ - struct kv_ps *kv_cps; - struct kv_ps *kv_rps; - int i; - struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; - struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) - return -EINVAL; - - kv_cps = kv_get_ps(cps); - kv_rps = kv_get_ps(rps); - - if (kv_cps == NULL) { - *equal = false; - return 0; - } - - if (kv_cps->num_levels != kv_rps->num_levels) { - *equal = false; - return 0; - } - - for (i = 0; i < kv_cps->num_levels; i++) { - if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), - &(kv_rps->levels[i]))) { - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); - *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); - - return 0; -} - -static int kv_dpm_read_sensor(void *handle, int idx, - void *value, int *size) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - uint32_t sclk; - u32 pl_index = - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - - /* size must be at least 4 bytes for all sensors */ - if (*size < 4) - return -EINVAL; - - switch (idx) { - case AMDGPU_PP_SENSOR_GFX_SCLK: - if (pl_index < SMU__NUM_SCLK_DPM_STATE) { - sclk = be32_to_cpu( - pi->graphics_level[pl_index].SclkFrequency); - *((uint32_t *)value) = sclk; - *size = 4; - return 0; - } - return -EINVAL; - case AMDGPU_PP_SENSOR_GPU_TEMP: - *((uint32_t *)value) = kv_dpm_get_temp(adev); - *size = 4; - return 0; - default: - return -EINVAL; - } -} - -static int kv_set_powergating_by_smu(void *handle, - uint32_t block_type, bool gate) -{ - switch (block_type) { - case AMD_IP_BLOCK_TYPE_UVD: - kv_dpm_powergate_uvd(handle, gate); - break; - case AMD_IP_BLOCK_TYPE_VCE: - kv_dpm_powergate_vce(handle, gate); - break; - default: - break; - } - return 0; -} - -static const struct amd_ip_funcs kv_dpm_ip_funcs = { - .name = "kv_dpm", - .early_init = kv_dpm_early_init, - .late_init = kv_dpm_late_init, - .sw_init = kv_dpm_sw_init, - .sw_fini = kv_dpm_sw_fini, - .hw_init = kv_dpm_hw_init, - .hw_fini = kv_dpm_hw_fini, - .suspend = kv_dpm_suspend, - .resume = kv_dpm_resume, - .is_idle = kv_dpm_is_idle, - .wait_for_idle = kv_dpm_wait_for_idle, - .soft_reset = kv_dpm_soft_reset, - .set_clockgating_state = kv_dpm_set_clockgating_state, - .set_powergating_state = kv_dpm_set_powergating_state, -}; - -const struct amdgpu_ip_block_version kv_smu_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 1, - .minor = 0, - .rev = 0, - .funcs = &kv_dpm_ip_funcs, -}; - -static const struct amd_pm_funcs kv_dpm_funcs = { - .pre_set_power_state = &kv_dpm_pre_set_power_state, - .set_power_state = &kv_dpm_set_power_state, - .post_set_power_state = &kv_dpm_post_set_power_state, - .display_configuration_changed = &kv_dpm_display_configuration_changed, - .get_sclk = &kv_dpm_get_sclk, - .get_mclk = &kv_dpm_get_mclk, - .print_power_state = &kv_dpm_print_power_state, - .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, - .force_performance_level = &kv_dpm_force_performance_level, - .set_powergating_by_smu = kv_set_powergating_by_smu, - .enable_bapm = &kv_dpm_enable_bapm, - .get_vce_clock_state = amdgpu_get_vce_clock_state, - .check_state_equal = kv_check_state_equal, - .read_sensor = &kv_dpm_read_sensor, -}; - -static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { - .set = kv_dpm_set_interrupt_state, - .process = kv_dpm_process_interrupt, -}; - -static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) -{ - adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; - adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h b/drivers/gpu/drm/amd/amdgpu/kv_dpm.h deleted file mode 100644 index 6df0ed41317c..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __KV_DPM_H__ -#define __KV_DPM_H__ - -#define SMU__NUM_SCLK_DPM_STATE 8 -#define SMU__NUM_MCLK_DPM_LEVELS 4 -#define SMU__NUM_LCLK_DPM_LEVELS 8 -#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */ -#include "smu7_fusion.h" -#include "ppsmc.h" - -#define SUMO_MAX_HARDWARE_POWERLEVELS 5 - -#define SUMO_MAX_NUMBER_VOLTAGES 4 - -struct sumo_vid_mapping_entry { - u16 vid_2bit; - u16 vid_7bit; -}; - -struct sumo_vid_mapping_table { - u32 num_entries; - struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES]; -}; - -struct sumo_sclk_voltage_mapping_entry { - u32 sclk_frequency; - u16 vid_2bit; - u16 rsv; -}; - -struct sumo_sclk_voltage_mapping_table { - u32 num_max_dpm_entries; - struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS]; -}; - -#define TRINITY_AT_DFLT 30 - -#define KV_NUM_NBPSTATES 4 - -enum kv_pt_config_reg_type { - KV_CONFIGREG_MMR = 0, - KV_CONFIGREG_SMC_IND, - KV_CONFIGREG_DIDT_IND, - KV_CONFIGREG_CACHE, - KV_CONFIGREG_MAX -}; - -struct kv_pt_config_reg { - u32 offset; - u32 mask; - u32 shift; - u32 value; - enum kv_pt_config_reg_type type; -}; - -struct kv_lcac_config_values { - u32 block_id; - u32 signal_id; - u32 t; -}; - -struct kv_lcac_config_reg { - u32 cntl; - u32 block_mask; - u32 block_shift; - u32 signal_mask; - u32 signal_shift; - u32 t_mask; - u32 t_shift; - u32 enable_mask; - u32 enable_shift; -}; - -struct kv_pl { - u32 sclk; - u8 vddc_index; - u8 ds_divider_index; - u8 ss_divider_index; - u8 allow_gnb_slow; - u8 force_nbp_state; - u8 display_wm; - u8 vce_wm; -}; - -struct kv_ps { - struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS]; - u32 num_levels; - bool need_dfs_bypass; - u8 dpm0_pg_nb_ps_lo; - u8 dpm0_pg_nb_ps_hi; - u8 dpmx_nb_ps_lo; - u8 dpmx_nb_ps_hi; -}; - -struct kv_sys_info { - u32 bootup_uma_clk; - u32 bootup_sclk; - u32 dentist_vco_freq; - u32 nb_dpm_enable; - u32 nbp_memory_clock[KV_NUM_NBPSTATES]; - u32 nbp_n_clock[KV_NUM_NBPSTATES]; - u16 bootup_nb_voltage_index; - u8 htc_tmp_lmt; - u8 htc_hyst_lmt; - struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table; - struct sumo_vid_mapping_table vid_mapping_table; - u32 uma_channel_number; -}; - -struct kv_power_info { - u32 at[SUMO_MAX_HARDWARE_POWERLEVELS]; - u32 voltage_drop_t; - struct kv_sys_info sys_info; - struct kv_pl boot_pl; - bool enable_nb_ps_policy; - bool disable_nb_ps3_in_battery; - bool video_start; - bool battery_state; - u32 lowest_valid; - u32 highest_valid; - u16 high_voltage_t; - bool cac_enabled; - bool bapm_enable; - /* smc offsets */ - u32 sram_end; - u32 dpm_table_start; - u32 soft_regs_start; - /* dpm SMU tables */ - u8 graphics_dpm_level_count; - u8 uvd_level_count; - u8 vce_level_count; - u8 acp_level_count; - u8 samu_level_count; - u16 fps_high_t; - SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE]; - SMU7_Fusion_ACPILevel acpi_level; - SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD]; - SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE]; - SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP]; - SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU]; - u8 uvd_boot_level; - u8 vce_boot_level; - u8 acp_boot_level; - u8 samu_boot_level; - u8 uvd_interval; - u8 vce_interval; - u8 acp_interval; - u8 samu_interval; - u8 graphics_boot_level; - u8 graphics_interval; - u8 graphics_therm_throttle_enable; - u8 graphics_voltage_change_enable; - u8 graphics_clk_slow_enable; - u8 graphics_clk_slow_divider; - u8 fps_low_t; - u32 low_sclk_interrupt_t; - bool uvd_power_gated; - bool vce_power_gated; - bool acp_power_gated; - bool samu_power_gated; - bool nb_dpm_enabled; - /* flags */ - bool enable_didt; - bool enable_dpm; - bool enable_auto_thermal_throttling; - bool enable_nb_dpm; - /* caps */ - bool caps_cac; - bool caps_power_containment; - bool caps_sq_ramping; - bool caps_db_ramping; - bool caps_td_ramping; - bool caps_tcp_ramping; - bool caps_sclk_throttle_low_notification; - bool caps_fps; - bool caps_uvd_dpm; - bool caps_uvd_pg; - bool caps_vce_pg; - bool caps_samu_pg; - bool caps_acp_pg; - bool caps_stable_p_state; - bool caps_enable_dfs_bypass; - bool caps_sclk_ds; - struct amdgpu_ps current_rps; - struct kv_ps current_ps; - struct amdgpu_ps requested_rps; - struct kv_ps requested_ps; -}; - -/* XXX are these ok? */ -#define KV_TEMP_RANGE_MIN (90 * 1000) -#define KV_TEMP_RANGE_MAX (120 * 1000) - -/* kv_smc.c */ -int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id); -int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask); -int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter); -int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit); -int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable); -int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable); -int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit); - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/amdgpu/kv_smc.c deleted file mode 100644 index 2d9ab6b8be66..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/kv_smc.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Alex Deucher - */ - -#include "amdgpu.h" -#include "cikd.h" -#include "kv_dpm.h" - -#include "smu/smu_7_0_0_d.h" -#include "smu/smu_7_0_0_sh_mask.h" - -int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id) -{ - u32 i; - u32 tmp = 0; - - WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK); - - for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0) - break; - udelay(1); - } - tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK; - - if (tmp != 1) { - if (tmp == 0xFF) - return -EINVAL; - else if (tmp == 0xFE) - return -EINVAL; - } - - return 0; -} - -int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask) -{ - int ret; - - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask); - - if (ret == 0) - *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0); - - return ret; -} - -int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter) -{ - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return amdgpu_kv_notify_message_to_smu(adev, msg); -} - -static int kv_set_smc_sram_address(struct amdgpu_device *adev, - u32 smc_address, u32 limit) -{ - if (smc_address & 3) - return -EINVAL; - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, - ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); - - return 0; -} - -int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit) -{ - int ret; - - ret = kv_set_smc_sram_address(adev, smc_address, limit); - if (ret) - return ret; - - *value = RREG32(mmSMC_IND_DATA_0); - return 0; -} - -int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable) -{ - if (enable) - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable); - else - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable); -} - -int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable) -{ - if (enable) - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM); - else - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM); -} - -int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit) -{ - int ret; - u32 data, original_data, addr, extra_shift, t_byte, count, mask; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - t_byte = addr & 3; - - /* RMW for the initial bytes */ - if (t_byte != 0) { - addr -= t_byte; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - original_data = RREG32(mmSMC_IND_DATA_0); - - data = 0; - mask = 0; - count = 4; - while (count > 0) { - if (t_byte > 0) { - mask = (mask << 8) | 0xff; - t_byte--; - } else if (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - mask <<= 8; - } else { - data <<= 8; - mask = (mask << 8) | 0xff; - } - count--; - } - - data |= original_data & mask; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - WREG32(mmSMC_IND_DATA_0, data); - - addr += 4; - } - - while (byte_count >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - /* RMW for the final bytes */ - if (byte_count > 0) { - data = 0; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - original_data = RREG32(mmSMC_IND_DATA_0); - - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - /* SMC address space is BE */ - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - - data |= (original_data & ~((~0UL) << extra_shift)); - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - WREG32(mmSMC_IND_DATA_0, data); - } - return 0; -} - diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c new file mode 100644 index 000000000000..8f7107d392af --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -0,0 +1,128 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu_ras.h" +#include "amdgpu.h" +#include "amdgpu_mca.h" + +#define smnMCMP0_STATUST0 0x03830408 +#define smnMCMP1_STATUST0 0x03b30408 +#define smnMCMPIO_STATUST0 0x0c930408 + + +static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_mca_query_ras_error_count(adev, + smnMCMP0_STATUST0, + ras_error_status); +} + +static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev) +{ + return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0); +} + +static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev) +{ + amdgpu_mca_ras_fini(adev, &adev->mca.mp0); +} + +const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = { + .ras_late_init = mca_v3_0_mp0_ras_late_init, + .ras_fini = mca_v3_0_mp0_ras_fini, + .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count, + .query_ras_error_address = NULL, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0, + .sysfs_name = "mp0_err_count", +}; + +static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_mca_query_ras_error_count(adev, + smnMCMP1_STATUST0, + ras_error_status); +} + +static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev) +{ + return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1); +} + +static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev) +{ + amdgpu_mca_ras_fini(adev, &adev->mca.mp1); +} + +const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = { + .ras_late_init = mca_v3_0_mp1_ras_late_init, + .ras_fini = mca_v3_0_mp1_ras_fini, + .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count, + .query_ras_error_address = NULL, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1, + .sysfs_name = "mp1_err_count", +}; + +static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_mca_query_ras_error_count(adev, + smnMCMPIO_STATUST0, + ras_error_status); +} + +static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev) +{ + return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio); +} + +static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev) +{ + amdgpu_mca_ras_fini(adev, &adev->mca.mpio); +} + +const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = { + .ras_late_init = mca_v3_0_mpio_ras_late_init, + .ras_fini = mca_v3_0_mpio_ras_fini, + .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count, + .query_ras_error_address = NULL, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO, + .sysfs_name = "mpio_err_count", +}; + + +static void mca_v3_0_init(struct amdgpu_device *adev) +{ + struct amdgpu_mca *mca = &adev->mca; + + mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs; + mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs; + mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs; +} + +const struct amdgpu_mca_funcs mca_v3_0_funcs = { + .init = mca_v3_0_init, +};
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h new file mode 100644 index 000000000000..b899b86194c2 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __MCA_V3_0_H__ +#define __MCA_V3_0_H__ + +extern const struct amdgpu_mca_funcs mca_v3_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mes_api_def.h b/drivers/gpu/drm/amd/amdgpu/mes_api_def.h new file mode 100644 index 000000000000..3f4fca5fd1da --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mes_api_def.h @@ -0,0 +1,443 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MES_API_DEF_H__ +#define __MES_API_DEF_H__ + +#pragma pack(push, 4) + +#define MES_API_VERSION 1 + +/* Driver submits one API(cmd) as a single Frame and this command size is same + * for all API to ease the debugging and parsing of ring buffer. + */ +enum { API_FRAME_SIZE_IN_DWORDS = 64 }; + +/* To avoid command in scheduler context to be overwritten whenenver mutilple + * interrupts come in, this creates another queue. + */ +enum { API_NUMBER_OF_COMMAND_MAX = 32 }; + +enum MES_API_TYPE { + MES_API_TYPE_SCHEDULER = 1, + MES_API_TYPE_MAX +}; + +enum MES_SCH_API_OPCODE { + MES_SCH_API_SET_HW_RSRC = 0, + MES_SCH_API_SET_SCHEDULING_CONFIG = 1, /* agreegated db, quantums, etc */ + MES_SCH_API_ADD_QUEUE = 2, + MES_SCH_API_REMOVE_QUEUE = 3, + MES_SCH_API_PERFORM_YIELD = 4, + MES_SCH_API_SET_GANG_PRIORITY_LEVEL = 5, + MES_SCH_API_SUSPEND = 6, + MES_SCH_API_RESUME = 7, + MES_SCH_API_RESET = 8, + MES_SCH_API_SET_LOG_BUFFER = 9, + MES_SCH_API_CHANGE_GANG_PRORITY = 10, + MES_SCH_API_QUERY_SCHEDULER_STATUS = 11, + MES_SCH_API_PROGRAM_GDS = 12, + MES_SCH_API_SET_DEBUG_VMID = 13, + MES_SCH_API_MISC = 14, + MES_SCH_API_MAX = 0xFF +}; + +union MES_API_HEADER { + struct { + uint32_t type : 4; /* 0 - Invalid; 1 - Scheduling; 2 - TBD */ + uint32_t opcode : 8; + uint32_t dwsize : 8; /* including header */ + uint32_t reserved : 12; + }; + + uint32_t u32All; +}; + +enum MES_AMD_PRIORITY_LEVEL { + AMD_PRIORITY_LEVEL_LOW = 0, + AMD_PRIORITY_LEVEL_NORMAL = 1, + AMD_PRIORITY_LEVEL_MEDIUM = 2, + AMD_PRIORITY_LEVEL_HIGH = 3, + AMD_PRIORITY_LEVEL_REALTIME = 4, + AMD_PRIORITY_NUM_LEVELS +}; + +enum MES_QUEUE_TYPE { + MES_QUEUE_TYPE_GFX, + MES_QUEUE_TYPE_COMPUTE, + MES_QUEUE_TYPE_SDMA, + MES_QUEUE_TYPE_MAX, +}; + +struct MES_API_STATUS { + uint64_t api_completion_fence_addr; + uint64_t api_completion_fence_value; +}; + +enum { MAX_COMPUTE_PIPES = 8 }; +enum { MAX_GFX_PIPES = 2 }; +enum { MAX_SDMA_PIPES = 2 }; + +enum { MAX_COMPUTE_HQD_PER_PIPE = 8 }; +enum { MAX_GFX_HQD_PER_PIPE = 8 }; +enum { MAX_SDMA_HQD_PER_PIPE = 10 }; + +enum { MAX_QUEUES_IN_A_GANG = 8 }; + +enum VM_HUB_TYPE { + VM_HUB_TYPE_GC = 0, + VM_HUB_TYPE_MM = 1, + VM_HUB_TYPE_MAX, +}; + +enum { VMID_INVALID = 0xffff }; + +enum { MAX_VMID_GCHUB = 16 }; +enum { MAX_VMID_MMHUB = 16 }; + +enum MES_LOG_OPERATION { + MES_LOG_OPERATION_CONTEXT_STATE_CHANGE = 0 +}; + +enum MES_LOG_CONTEXT_STATE { + MES_LOG_CONTEXT_STATE_IDLE = 0, + MES_LOG_CONTEXT_STATE_RUNNING = 1, + MES_LOG_CONTEXT_STATE_READY = 2, + MES_LOG_CONTEXT_STATE_READY_STANDBY = 3, +}; + +struct MES_LOG_CONTEXT_STATE_CHANGE { + void *h_context; + enum MES_LOG_CONTEXT_STATE new_context_state; +}; + +struct MES_LOG_ENTRY_HEADER { + uint32_t first_free_entry_index; + uint32_t wraparound_count; + uint64_t number_of_entries; + uint64_t reserved[2]; +}; + +struct MES_LOG_ENTRY_DATA { + uint64_t gpu_time_stamp; + uint32_t operation_type; /* operation_type is of MES_LOG_OPERATION type */ + uint32_t reserved_operation_type_bits; + union { + struct MES_LOG_CONTEXT_STATE_CHANGE context_state_change; + uint64_t reserved_operation_data[2]; + }; +}; + +struct MES_LOG_BUFFER { + struct MES_LOG_ENTRY_HEADER header; + struct MES_LOG_ENTRY_DATA entries[1]; +}; + +union MESAPI_SET_HW_RESOURCES { + struct { + union MES_API_HEADER header; + uint32_t vmid_mask_mmhub; + uint32_t vmid_mask_gfxhub; + uint32_t gds_size; + uint32_t paging_vmid; + uint32_t compute_hqd_mask[MAX_COMPUTE_PIPES]; + uint32_t gfx_hqd_mask[MAX_GFX_PIPES]; + uint32_t sdma_hqd_mask[MAX_SDMA_PIPES]; + uint32_t agreegated_doorbells[AMD_PRIORITY_NUM_LEVELS]; + uint64_t g_sch_ctx_gpu_mc_ptr; + uint64_t query_status_fence_gpu_mc_ptr; + struct MES_API_STATUS api_status; + union { + struct { + uint32_t disable_reset : 1; + uint32_t reserved : 31; + }; + uint32_t uint32_t_all; + }; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__ADD_QUEUE { + struct { + union MES_API_HEADER header; + uint32_t process_id; + uint64_t page_table_base_addr; + uint64_t process_va_start; + uint64_t process_va_end; + uint64_t process_quantum; + uint64_t process_context_addr; + uint64_t gang_quantum; + uint64_t gang_context_addr; + uint32_t inprocess_gang_priority; + enum MES_AMD_PRIORITY_LEVEL gang_global_priority_level; + uint32_t doorbell_offset; + uint64_t mqd_addr; + uint64_t wptr_addr; + enum MES_QUEUE_TYPE queue_type; + uint32_t gds_base; + uint32_t gds_size; + uint32_t gws_base; + uint32_t gws_size; + uint32_t oa_mask; + + struct { + uint32_t paging : 1; + uint32_t debug_vmid : 4; + uint32_t program_gds : 1; + uint32_t is_gang_suspended : 1; + uint32_t is_tmz_queue : 1; + uint32_t reserved : 24; + }; + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__REMOVE_QUEUE { + struct { + union MES_API_HEADER header; + uint32_t doorbell_offset; + uint64_t gang_context_addr; + + struct { + uint32_t unmap_legacy_gfx_queue : 1; + uint32_t reserved : 31; + }; + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__SET_SCHEDULING_CONFIG { + struct { + union MES_API_HEADER header; + /* Grace period when preempting another priority band for this + * priority band. The value for idle priority band is ignored, + * as it never preempts other bands. + */ + uint64_t grace_period_other_levels[AMD_PRIORITY_NUM_LEVELS]; + /* Default quantum for scheduling across processes within + * a priority band. + */ + uint64_t process_quantum_for_level[AMD_PRIORITY_NUM_LEVELS]; + /* Default grace period for processes that preempt each other + * within a priority band. + */ + uint64_t process_grace_period_same_level[AMD_PRIORITY_NUM_LEVELS]; + /* For normal level this field specifies the target GPU + * percentage in situations when it's starved by the high level. + * Valid values are between 0 and 50, with the default being 10. + */ + uint32_t normal_yield_percent; + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__PERFORM_YIELD { + struct { + union MES_API_HEADER header; + uint32_t dummy; + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__CHANGE_GANG_PRIORITY_LEVEL { + struct { + union MES_API_HEADER header; + uint32_t inprocess_gang_priority; + enum MES_AMD_PRIORITY_LEVEL gang_global_priority_level; + uint64_t gang_quantum; + uint64_t gang_context_addr; + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__SUSPEND { + struct { + union MES_API_HEADER header; + /* false - suspend all gangs; true - specific gang */ + struct { + uint32_t suspend_all_gangs : 1; + uint32_t reserved : 31; + }; + /* gang_context_addr is valid only if suspend_all = false */ + uint64_t gang_context_addr; + + uint64_t suspend_fence_addr; + uint32_t suspend_fence_value; + + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__RESUME { + struct { + union MES_API_HEADER header; + /* false - resume all gangs; true - specified gang */ + struct { + uint32_t resume_all_gangs : 1; + uint32_t reserved : 31; + }; + /* valid only if resume_all_gangs = false */ + uint64_t gang_context_addr; + + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__RESET { + struct { + union MES_API_HEADER header; + + struct { + uint32_t reset_queue : 1; + uint32_t reserved : 31; + }; + + uint64_t gang_context_addr; + uint32_t doorbell_offset; /* valid only if reset_queue = true */ + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__SET_LOGGING_BUFFER { + struct { + union MES_API_HEADER header; + /* There are separate log buffers for each queue type */ + enum MES_QUEUE_TYPE log_type; + /* Log buffer GPU Address */ + uint64_t logging_buffer_addr; + /* number of entries in the log buffer */ + uint32_t number_of_entries; + /* Entry index at which CPU interrupt needs to be signalled */ + uint32_t interrupt_entry; + + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__QUERY_MES_STATUS { + struct { + union MES_API_HEADER header; + bool mes_healthy; /* 0 - not healthy, 1 - healthy */ + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__PROGRAM_GDS { + struct { + union MES_API_HEADER header; + uint64_t process_context_addr; + uint32_t gds_base; + uint32_t gds_size; + uint32_t gws_base; + uint32_t gws_size; + uint32_t oa_mask; + struct MES_API_STATUS api_status; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +union MESAPI__SET_DEBUG_VMID { + struct { + union MES_API_HEADER header; + struct MES_API_STATUS api_status; + union { + struct { + uint32_t use_gds : 1; + uint32_t reserved : 31; + } flags; + uint32_t u32All; + }; + uint32_t reserved; + uint32_t debug_vmid; + uint64_t process_context_addr; + uint64_t page_table_base_addr; + uint64_t process_va_start; + uint64_t process_va_end; + uint32_t gds_base; + uint32_t gds_size; + uint32_t gws_base; + uint32_t gws_size; + uint32_t oa_mask; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +enum MESAPI_MISC_OPCODE { + MESAPI_MISC__MODIFY_REG, + MESAPI_MISC__MAX, +}; + +enum MODIFY_REG_SUBCODE { + MODIFY_REG__OVERWRITE, + MODIFY_REG__RMW_OR, + MODIFY_REG__RMW_AND, + MODIFY_REG__MAX, +}; + +enum { MISC_DATA_MAX_SIZE_IN_DWORDS = 20 }; + +union MESAPI__MISC { + struct { + union MES_API_HEADER header; + enum MESAPI_MISC_OPCODE opcode; + struct MES_API_STATUS api_status; + + union { + struct { + enum MODIFY_REG_SUBCODE subcode; + uint32_t reg_offset; + uint32_t reg_value; + } modify_reg; + uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS]; + }; + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + +#pragma pack(pop) +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 29fab7984855..a7ec4ac89da5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -28,19 +28,165 @@ #include "nv.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" +#include "v10_structs.h" +#include "mes_api_def.h" + +#define mmCP_MES_IC_OP_CNTL_Sienna_Cichlid 0x2820 +#define mmCP_MES_IC_OP_CNTL_Sienna_Cichlid_BASE_IDX 1 MODULE_FIRMWARE("amdgpu/navi10_mes.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_mes.bin"); + +static int mes_v10_1_hw_fini(void *handle); + +#define MES_EOP_SIZE 2048 + +static void mes_v10_1_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], + ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); + } else { + BUG(); + } +} + +static u64 mes_v10_1_ring_get_rptr(struct amdgpu_ring *ring) +{ + return ring->adev->wb.wb[ring->rptr_offs]; +} + +static u64 mes_v10_1_ring_get_wptr(struct amdgpu_ring *ring) +{ + u64 wptr; + + if (ring->use_doorbell) + wptr = atomic64_read((atomic64_t *) + &ring->adev->wb.wb[ring->wptr_offs]); + else + BUG(); + return wptr; +} + +static const struct amdgpu_ring_funcs mes_v10_1_ring_funcs = { + .type = AMDGPU_RING_TYPE_MES, + .align_mask = 1, + .nop = 0, + .support_64bit_ptrs = true, + .get_rptr = mes_v10_1_ring_get_rptr, + .get_wptr = mes_v10_1_ring_get_wptr, + .set_wptr = mes_v10_1_ring_set_wptr, + .insert_nop = amdgpu_ring_insert_nop, +}; + +static int mes_v10_1_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, + void *pkt, int size) +{ + int ndw = size / 4; + signed long r; + union MESAPI__ADD_QUEUE *x_pkt = pkt; + struct amdgpu_device *adev = mes->adev; + struct amdgpu_ring *ring = &mes->ring; + + BUG_ON(size % 4 != 0); + + if (amdgpu_ring_alloc(ring, ndw)) + return -ENOMEM; + + amdgpu_ring_write_multiple(ring, pkt, ndw); + amdgpu_ring_commit(ring); + + DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode); + + r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, + adev->usec_timeout); + if (r < 1) { + DRM_ERROR("MES failed to response msg=%d\n", + x_pkt->header.opcode); + return -ETIMEDOUT; + } + + return 0; +} + +static int convert_to_mes_queue_type(int queue_type) +{ + if (queue_type == AMDGPU_RING_TYPE_GFX) + return MES_QUEUE_TYPE_GFX; + else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) + return MES_QUEUE_TYPE_COMPUTE; + else if (queue_type == AMDGPU_RING_TYPE_SDMA) + return MES_QUEUE_TYPE_SDMA; + else + BUG(); + return -1; +} static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes, struct mes_add_queue_input *input) { - return 0; + struct amdgpu_device *adev = mes->adev; + union MESAPI__ADD_QUEUE mes_add_queue_pkt; + + memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); + + mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE; + mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_add_queue_pkt.process_id = input->process_id; + mes_add_queue_pkt.page_table_base_addr = + input->page_table_base_addr - adev->gmc.vram_start; + mes_add_queue_pkt.process_va_start = input->process_va_start; + mes_add_queue_pkt.process_va_end = input->process_va_end; + mes_add_queue_pkt.process_quantum = input->process_quantum; + mes_add_queue_pkt.process_context_addr = input->process_context_addr; + mes_add_queue_pkt.gang_quantum = input->gang_quantum; + mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; + mes_add_queue_pkt.inprocess_gang_priority = + input->inprocess_gang_priority; + mes_add_queue_pkt.gang_global_priority_level = + input->gang_global_priority_level; + mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; + mes_add_queue_pkt.mqd_addr = input->mqd_addr; + mes_add_queue_pkt.wptr_addr = input->wptr_addr; + mes_add_queue_pkt.queue_type = + convert_to_mes_queue_type(input->queue_type); + mes_add_queue_pkt.paging = input->paging; + + mes_add_queue_pkt.api_status.api_completion_fence_addr = + mes->ring.fence_drv.gpu_addr; + mes_add_queue_pkt.api_status.api_completion_fence_value = + ++mes->ring.fence_drv.sync_seq; + + return mes_v10_1_submit_pkt_and_poll_completion(mes, + &mes_add_queue_pkt, sizeof(mes_add_queue_pkt)); } static int mes_v10_1_remove_hw_queue(struct amdgpu_mes *mes, struct mes_remove_queue_input *input) { - return 0; + union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; + + memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); + + mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE; + mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; + mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; + + mes_remove_queue_pkt.api_status.api_completion_fence_addr = + mes->ring.fence_drv.gpu_addr; + mes_remove_queue_pkt.api_status.api_completion_fence_value = + ++mes->ring.fence_drv.sync_seq; + + return mes_v10_1_submit_pkt_and_poll_completion(mes, + &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt)); } static int mes_v10_1_suspend_gang(struct amdgpu_mes *mes, @@ -55,6 +201,68 @@ static int mes_v10_1_resume_gang(struct amdgpu_mes *mes, return 0; } +static int mes_v10_1_query_sched_status(struct amdgpu_mes *mes) +{ + union MESAPI__QUERY_MES_STATUS mes_status_pkt; + + memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); + + mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; + mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_status_pkt.api_status.api_completion_fence_addr = + mes->ring.fence_drv.gpu_addr; + mes_status_pkt.api_status.api_completion_fence_value = + ++mes->ring.fence_drv.sync_seq; + + return mes_v10_1_submit_pkt_and_poll_completion(mes, + &mes_status_pkt, sizeof(mes_status_pkt)); +} + +static int mes_v10_1_set_hw_resources(struct amdgpu_mes *mes) +{ + int i; + struct amdgpu_device *adev = mes->adev; + union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt; + + memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt)); + + mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC; + mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + + mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub; + mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub; + mes_set_hw_res_pkt.gds_size = adev->gds.gds_size; + mes_set_hw_res_pkt.paging_vmid = 0; + mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr; + mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr = + mes->query_status_fence_gpu_addr; + + for (i = 0; i < MAX_COMPUTE_PIPES; i++) + mes_set_hw_res_pkt.compute_hqd_mask[i] = + mes->compute_hqd_mask[i]; + + for (i = 0; i < MAX_GFX_PIPES; i++) + mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i]; + + for (i = 0; i < MAX_SDMA_PIPES; i++) + mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i]; + + for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++) + mes_set_hw_res_pkt.agreegated_doorbells[i] = + mes->agreegated_doorbells[i]; + + mes_set_hw_res_pkt.api_status.api_completion_fence_addr = + mes->ring.fence_drv.gpu_addr; + mes_set_hw_res_pkt.api_status.api_completion_fence_value = + ++mes->ring.fence_drv.sync_seq; + + return mes_v10_1_submit_pkt_and_poll_completion(mes, + &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt)); +} + static const struct amdgpu_mes_funcs mes_v10_1_funcs = { .add_hw_queue = mes_v10_1_add_hw_queue, .remove_hw_queue = mes_v10_1_remove_hw_queue, @@ -68,11 +276,15 @@ static int mes_v10_1_init_microcode(struct amdgpu_device *adev) char fw_name[30]; int err; const struct mes_firmware_header_v1_0 *mes_hdr; + struct amdgpu_firmware_info *info; switch (adev->asic_type) { case CHIP_NAVI10: chip_name = "navi10"; break; + case CHIP_SIENNA_CICHLID: + chip_name = "sienna_cichlid"; + break; default: BUG(); } @@ -100,6 +312,22 @@ static int mes_v10_1_init_microcode(struct amdgpu_device *adev) le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MES]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MES; + info->fw = adev->mes.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), + PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MES_DATA]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MES_DATA; + info->fw = adev->mes.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), + PAGE_SIZE); + } + return 0; } @@ -267,15 +495,43 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_MES_MDBOUND_LO, 0x3FFFF); /* invalidate ICACHE */ - data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL); + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid); + break; + default: + data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL); + break; + } data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); - WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data); + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data); + break; + default: + WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data); + break; + } /* prime the ICACHE. */ - data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL); + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid); + break; + default: + data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL); + break; + } data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); - WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data); + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data); + break; + default: + WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data); + break; + } nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -283,15 +539,373 @@ static int mes_v10_1_load_microcode(struct amdgpu_device *adev) return 0; } +static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev) +{ + int r; + u32 *eop; + + r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->mes.eop_gpu_obj, + &adev->mes.eop_gpu_addr, + (void **)&eop); + if (r) { + dev_warn(adev->dev, "(%d) create EOP bo failed\n", r); + return r; + } + + memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size); + + amdgpu_bo_kunmap(adev->mes.eop_gpu_obj); + amdgpu_bo_unreserve(adev->mes.eop_gpu_obj); + + return 0; +} + +static int mes_v10_1_allocate_mem_slots(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs); + if (r) { + dev_err(adev->dev, + "(%d) mes sch_ctx_offs wb alloc failed\n", r); + return r; + } + adev->mes.sch_ctx_gpu_addr = + adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4); + adev->mes.sch_ctx_ptr = + (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs]; + + r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs); + if (r) { + dev_err(adev->dev, + "(%d) query_status_fence_offs wb alloc failed\n", r); + return r; + } + adev->mes.query_status_fence_gpu_addr = + adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4); + adev->mes.query_status_fence_ptr = + (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs]; + + return 0; +} + +static int mes_v10_1_mqd_init(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v10_compute_mqd *mqd = ring->mqd_ptr; + uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; + uint32_t tmp; + + mqd->header = 0xC0310800; + mqd->compute_pipelinestat_enable = 0x00000001; + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; + mqd->compute_misc_reserved = 0x00000003; + + eop_base_addr = ring->eop_gpu_addr >> 8; + mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, + (order_base_2(MES_EOP_SIZE / 4) - 1)); + + mqd->cp_hqd_eop_control = tmp; + + /* enable doorbell? */ + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); + + if (ring->use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + } + else + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* disable the queue if it's active */ + ring->wptr = 0; + mqd->cp_hqd_dequeue_request = 0; + mqd->cp_hqd_pq_rptr = 0; + mqd->cp_hqd_pq_wptr_lo = 0; + mqd->cp_hqd_pq_wptr_hi = 0; + + /* set the pointer to the MQD */ + mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; + mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); + + /* set MQD vmid to 0 */ + tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); + mqd->cp_mqd_control = tmp; + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + hqd_gpu_addr = ring->gpu_addr >> 8; + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, + (order_base_2(ring->ring_size / 4) - 1)); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, + ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); +#endif + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + mqd->cp_hqd_pq_control = tmp; + + /* set the wb address whether it's enabled or not */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8; + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + + tmp = 0; + /* enable the doorbell if requested */ + if (ring->use_doorbell) { + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + } + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + ring->wptr = 0; + mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR); + + /* set the vmid for the queue */ + mqd->cp_hqd_vmid = 0; + + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE); + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); + mqd->cp_hqd_persistent_state = tmp; + + /* set MIN_IB_AVAIL_SIZE */ + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); + mqd->cp_hqd_ib_control = tmp; + + /* activate the queue */ + mqd->cp_hqd_active = 1; + return 0; +} + +static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring) +{ + struct v10_compute_mqd *mqd = ring->mqd_ptr; + struct amdgpu_device *adev = ring->adev; + uint32_t data = 0; + + mutex_lock(&adev->srbm_mutex); + nv_grbm_select(adev, 3, 0, 0, 0); + + /* set CP_HQD_VMID.VMID = 0. */ + data = RREG32_SOC15(GC, 0, mmCP_HQD_VMID); + data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0); + WREG32_SOC15(GC, 0, mmCP_HQD_VMID, data); + + /* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */ + data = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); + data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data); + + /* set CP_MQD_BASE_ADDR/HI with the MQD base address */ + WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); + WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); + + /* set CP_MQD_CONTROL.VMID=0 */ + data = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); + data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0); + WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, 0); + + /* set CP_HQD_PQ_BASE/HI with the ring buffer base address */ + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); + + /* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */ + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR, + mqd->cp_hqd_pq_rptr_report_addr_lo); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + mqd->cp_hqd_pq_rptr_report_addr_hi); + + /* set CP_HQD_PQ_CONTROL */ + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); + + /* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */ + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, + mqd->cp_hqd_pq_wptr_poll_addr_lo); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, + mqd->cp_hqd_pq_wptr_poll_addr_hi); + + /* set CP_HQD_PQ_DOORBELL_CONTROL */ + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, + mqd->cp_hqd_pq_doorbell_control); + + /* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */ + WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); + + /* set CP_HQD_ACTIVE.ACTIVE=1 */ + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, mqd->cp_hqd_active); + + nv_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +#if 0 +static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + int r; + + if (!kiq->pmf || !kiq->pmf->kiq_map_queues) + return -EINVAL; + + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + return r; + } + + kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring); + + r = amdgpu_ring_test_ring(kiq_ring); + if (r) { + DRM_ERROR("kfq enable failed\n"); + kiq_ring->sched.ready = false; + } + return r; +} +#endif + +static int mes_v10_1_queue_init(struct amdgpu_device *adev) +{ + int r; + + r = mes_v10_1_mqd_init(&adev->mes.ring); + if (r) + return r; + +#if 0 + r = mes_v10_1_kiq_enable_queue(adev); + if (r) + return r; +#else + mes_v10_1_queue_init_register(&adev->mes.ring); +#endif + + return 0; +} + +static int mes_v10_1_ring_init(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + + ring = &adev->mes.ring; + + ring->funcs = &mes_v10_1_ring_funcs; + + ring->me = 3; + ring->pipe = 0; + ring->queue = 0; + + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = adev->doorbell_index.mes_ring << 1; + ring->eop_gpu_addr = adev->mes.eop_gpu_addr; + ring->no_scheduler = true; + sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue); + + return amdgpu_ring_init(adev, ring, 1024, NULL, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); +} + +static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev) +{ + int r, mqd_size = sizeof(struct v10_compute_mqd); + struct amdgpu_ring *ring = &adev->mes.ring; + + if (ring->mqd_obj) + return 0; + + r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + &ring->mqd_gpu_addr, &ring->mqd_ptr); + if (r) { + dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); + return r; + } + + /* prepare MQD backup */ + adev->mes.mqd_backup = kmalloc(mqd_size, GFP_KERNEL); + if (!adev->mes.mqd_backup) + dev_warn(adev->dev, + "no memory to create MQD backup for ring %s\n", + ring->name); + + return 0; +} + static int mes_v10_1_sw_init(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->mes.adev = adev; + adev->mes.funcs = &mes_v10_1_funcs; + r = mes_v10_1_init_microcode(adev); if (r) return r; + r = mes_v10_1_allocate_eop_buf(adev); + if (r) + return r; + + r = mes_v10_1_mqd_sw_init(adev); + if (r) + return r; + + r = mes_v10_1_ring_init(adev); + if (r) + return r; + + r = mes_v10_1_allocate_mem_slots(adev); + if (r) + return r; + return 0; } @@ -299,6 +913,19 @@ static int mes_v10_1_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); + amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); + + kfree(adev->mes.mqd_backup); + + amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj, + &adev->mes.ring.mqd_gpu_addr, + &adev->mes.ring.mqd_ptr); + + amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj, + &adev->mes.eop_gpu_addr, + NULL); + mes_v10_1_free_microcode(adev); return 0; @@ -315,14 +942,29 @@ static int mes_v10_1_hw_init(void *handle) DRM_ERROR("failed to MES fw, r=%d\n", r); return r; } - } else { - DRM_ERROR("only support direct fw loading on MES\n"); - return -EINVAL; } mes_v10_1_enable(adev, true); + r = mes_v10_1_queue_init(adev); + if (r) + goto failure; + + r = mes_v10_1_set_hw_resources(&adev->mes); + if (r) + goto failure; + + r = mes_v10_1_query_sched_status(&adev->mes); + if (r) { + DRM_ERROR("MES is busy\n"); + goto failure; + } + return 0; + +failure: + mes_v10_1_hw_fini(adev); + return r; } static int mes_v10_1_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 28105e4af507..a99953833820 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -27,18 +27,14 @@ #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" #include "mmhub/mmhub_1_0_default.h" -#include "mmhub/mmhub_9_4_0_offset.h" #include "vega10_enum.h" - +#include "soc15.h" #include "soc15_common.h" #define mmDAGB0_CNTL_MISC2_RV 0x008f #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0 -#define EA_EDC_CNT_MASK 0x3 -#define EA_EDC_CNT_SHIFT 0x2 - -u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) +static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP); @@ -55,18 +51,18 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) return base; } -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, +static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */ - int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - offset * vmid, lower_32_bits(page_table_base)); + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - offset * vmid, upper_32_bits(page_table_base)); + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); } static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) @@ -100,7 +96,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) + if (adev->apu_flags & AMD_APU_IS_RAVEN2) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the @@ -118,8 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) return; /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, @@ -234,6 +229,7 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -271,43 +267,48 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) /* Send no-retry XNACK on fault to suppress VM fault storm. */ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !amdgpu_noretry); - WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp); - WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2, - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2, - upper_32_bits(adev->vm_manager.max_pfn - 1)); + !adev->gmc.noretry); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); } } static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; unsigned i; for (i = 0; i < 18; ++i) { WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, - 2 * i, 0xffffffff); + i * hub->eng_addr_distance, 0xffffffff); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, - 2 * i, 0x1f); + i * hub->eng_addr_distance, 0x1f); } } -void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, +static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable) { if (amdgpu_sriov_vf(adev)) return; if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); } } -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) +static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { /* @@ -335,14 +336,16 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) return 0; } -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) +static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; u32 tmp; u32 i; /* Disable all tables */ - for (i = 0; i < 16; i++) - WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0); + for (i = 0; i < AMDGPU_NUM_VMID; i++) + WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); /* Setup TLB control */ tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL); @@ -368,7 +371,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) +static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; @@ -405,12 +408,12 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) CRASH_ON_NO_RETRY_FAULT, 1); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, CRASH_ON_RETRY_FAULT, 1); - } + } WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); } -void mmhub_v1_0_init(struct amdgpu_device *adev) +static void mmhub_v1_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; @@ -433,6 +436,12 @@ void mmhub_v1_0_init(struct amdgpu_device *adev) hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); + hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; } static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -514,7 +523,7 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); } -int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, +static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state) { if (amdgpu_sriov_vf(adev)) @@ -527,9 +536,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_RAVEN: case CHIP_RENOIR: mmhub_v1_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v1_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -538,7 +547,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, return 0; } -void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) +static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) { int data, data1; @@ -564,62 +573,223 @@ void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) *flags |= AMD_CG_SUPPORT_MC_LS; } +static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = { + { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + } +}; + +static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0}, +}; + +static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, + uint32_t value, uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) { + if (mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset) + continue; + + sec_cnt = (value & + mmhub_v1_0_ras_fields[i].sec_count_mask) >> + mmhub_v1_0_ras_fields[i].sec_count_shift; + if (sec_cnt) { + dev_info(adev->dev, + "MMHUB SubBlock %s, SEC %d\n", + mmhub_v1_0_ras_fields[i].name, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + mmhub_v1_0_ras_fields[i].ded_count_mask) >> + mmhub_v1_0_ras_fields[i].ded_count_shift; + if (ded_cnt) { + dev_info(adev->dev, + "MMHUB SubBlock %s, DED %d\n", + mmhub_v1_0_ras_fields[i].name, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - int i; - uint32_t ea0_edc_cnt, ea0_edc_cnt2; - uint32_t ea1_edc_cnt, ea1_edc_cnt2; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - /* EDC CNT will be cleared automatically after read */ - ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT_VG20); - ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20); - ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT_VG20); - ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20); - - /* error count of each error type is recorded by 2 bits, - * ce and ue count in EDC_CNT - */ - for (i = 0; i < 5; i++) { - err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; - err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; - } - /* successive ue count in EDC_CNT */ - for (i = 0; i < 5; i++) { - err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i; + uint32_t reg_value; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); + if (reg_value) + mmhub_v1_0_get_ras_error_count(adev, + &mmhub_v1_0_edc_cnt_regs[i], + reg_value, &sec_count, &ded_count); } - /* ce and ue count in EDC_CNT2 */ - for (i = 0; i < 3; i++) { - err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - } - /* successive ue count in EDC_CNT2 */ - for (i = 0; i < 6; i++) { - err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; +} + +static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); } } -const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { +const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, + .ras_fini = amdgpu_mmhub_ras_fini, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, + .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, +}; + +const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { + .get_fb_location = mmhub_v1_0_get_fb_location, + .init = mmhub_v1_0_init, + .gart_enable = mmhub_v1_0_gart_enable, + .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default, + .gart_disable = mmhub_v1_0_gart_disable, + .set_clockgating = mmhub_v1_0_set_clockgating, + .get_clockgating = mmhub_v1_0_get_clockgating, + .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs, + .update_power_gating = mmhub_v1_0_update_power_gating, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index c43319e8f945..4661b094e007 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -24,19 +24,6 @@ #define __MMHUB_V1_0_H__ extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs; - -u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev); -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev); -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev); -void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void mmhub_v1_0_init(struct amdgpu_device *adev); -int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, - enum amd_clockgating_state state); -void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); -void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, - bool enable); -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); +extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c new file mode 100644 index 000000000000..f80a14a1b82d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -0,0 +1,1343 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_ras.h" +#include "mmhub_v1_7.h" + +#include "mmhub/mmhub_1_7_offset.h" +#include "mmhub/mmhub_1_7_sh_mask.h" +#include "vega10_enum.h" + +#include "soc15_common.h" +#include "soc15.h" + +#define regVM_L2_CNTL3_DEFAULT 0x80100007 +#define regVM_L2_CNTL4_DEFAULT 0x000000c1 + +static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev) +{ + u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE); + u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP); + + base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; + base <<= 24; + + top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; + top <<= 24; + + adev->gmc.fb_start = base; + adev->gmc.fb_end = top; + + return base; +} + +static void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); + + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); +} + +static void mmhub_v1_7_init_gart_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t pt_base; + + if (adev->gmc.pdb0_bo) + pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); + else + pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v1_7_setup_vm_pt_regs(adev, 0, pt_base); + + /* If use GART for FB translation, vmid0 page table covers both + * vram and system memory (gart) + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.fb_start >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.fb_start >> 44)); + + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + + } else { + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); + } +} + +static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t value; + uint32_t tmp; + + /* Program the AGP BAR */ + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + if (amdgpu_sriov_vf(adev)) + return; + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* In the case squeezing vram into GART aperture, we don't use + * FB aperture and AGP aperture. Disable them. + */ + if (adev->gmc.pdb0_bo) { + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF); + WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); + } + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); +} + +static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC);/* XXX for emulation. */ + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); + + WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); +} + +static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + if (amdgpu_sriov_vf(adev)) + return; + + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, + 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp); + + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp); + + tmp = regVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp); + + tmp = regVM_L2_CNTL4_DEFAULT; + if (adev->gmc.xgmi.connected_to_cpu) { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PDE_REQUEST_PHYSICAL, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PTE_REQUEST_PHYSICAL, 1); + } else { + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, + VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + } + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp); +} + +static void mmhub_v1_7_enable_system_domain(struct amdgpu_device *adev) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, + adev->gmc.vmid0_page_table_depth); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, + adev->gmc.vmid0_page_table_block_size); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp); +} + +static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return; + + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0XFFFFFFFF); + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); + + WREG32_SOC15(MMHUB, 0, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); + WREG32_SOC15(MMHUB, 0, + regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); + + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, + 0); + WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, + 0); +} + +static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + unsigned num_level, block_size; + uint32_t tmp; + int i; + + num_level = adev->vm_manager.num_level; + block_size = adev->vm_manager.block_size; + if (adev->gmc.translate_further) + num_level -= 1; + else + block_size -= 9; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + num_level); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + block_size); + /* On Aldebaran, XNACK can be enabled in the SQ per-process. + * Retry faults need to be enabled for that to work. + */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + 1); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } +} + +static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + unsigned i; + + for (i = 0; i < 18; ++i) { + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } +} + +static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev) +{ + /* GART Enable. */ + mmhub_v1_7_init_gart_aperture_regs(adev); + mmhub_v1_7_init_system_aperture_regs(adev); + mmhub_v1_7_init_tlb_regs(adev); + mmhub_v1_7_init_cache_regs(adev); + + mmhub_v1_7_enable_system_domain(adev); + mmhub_v1_7_disable_identity_aperture(adev); + mmhub_v1_7_setup_vmid_config(adev); + mmhub_v1_7_program_invalidation(adev); + + return 0; +} + +static void mmhub_v1_7_gart_disable(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + u32 tmp; + u32 i; + + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, + MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, + 0); + WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); + + if (!amdgpu_sriov_vf(adev)) { + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp); + WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0); + } +} + +/** + * mmhub_v1_7_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool value) +{ + u32 tmp; + + if (amdgpu_sriov_vf(adev)) + return; + + tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, + VM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + + WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp); +} + +static void mmhub_v1_7_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(MMHUB, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(MMHUB, 0, + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + +} + +static void mmhub_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data, def1, data1, def2 = 0, data2 = 0; + + def = data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG); + + def1 = data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2); + def2 = data2 = RREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2); + + if (enable) { + data |= ATC_L2_MISC_CG__ENABLE_MASK; + + data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); + + data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); + } else { + data &= ~ATC_L2_MISC_CG__ENABLE_MASK; + + data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); + + data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); + } + + if (def != data) + WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data); + + if (def1 != data1) + WREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2, data1); + + if (def2 != data2) + WREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2, data2); +} + +static void mmhub_v1_7_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG); + + if (enable) + data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; + else + data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; + + if (def != data) + WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data); +} + +static int mmhub_v1_7_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + if (amdgpu_sriov_vf(adev)) + return 0; + + /* Change state only if MCCG support is enabled through driver */ + if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) + mmhub_v1_7_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + + /* Change state only if LS support is enabled through driver */ + if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) + mmhub_v1_7_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); + + return 0; +} + +static void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u32 *flags) +{ + int data, data1; + + if (amdgpu_sriov_vf(adev)) + *flags = 0; + + data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG); + + data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2); + + /* AMD_CG_SUPPORT_MC_MGCG */ + if ((data & ATC_L2_MISC_CG__ENABLE_MASK) && + !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) + *flags |= AMD_CG_SUPPORT_MC_MGCG; + + /* AMD_CG_SUPPORT_MC_LS */ + if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_MC_LS; +} + +static const struct soc15_ras_field_entry mmhub_v1_7_ras_fields[] = { + /* MMHUB Range 0 */ + { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + + /* MMHUB Range 1 */ + { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + + /* MMHAB Range 2*/ + { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + + /* MMHUB Rang 3 */ + { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + + /* MMHUB Range 4 */ + { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + + /* MMHUAB Range 5 */ + { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, +}; + +static const struct soc15_reg_entry mmhub_v1_7_edc_cnt_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), 0, 0, 0 }, +}; + +static int mmhub_v1_7_get_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, + uint32_t value, + uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ras_fields); i++) { + if(mmhub_v1_7_ras_fields[i].reg_offset != reg->reg_offset) + continue; + + sec_cnt = (value & + mmhub_v1_7_ras_fields[i].sec_count_mask) >> + mmhub_v1_7_ras_fields[i].sec_count_shift; + if (sec_cnt) { + dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n", + mmhub_v1_7_ras_fields[i].name, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + mmhub_v1_7_ras_fields[i].ded_count_mask) >> + mmhub_v1_7_ras_fields[i].ded_count_shift; + if (ded_cnt) { + dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n", + mmhub_v1_7_ras_fields[i].name, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +static void mmhub_v1_7_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i; + uint32_t reg_value; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_edc_cnt_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_edc_cnt_regs[i])); + if (reg_value) + mmhub_v1_7_get_ras_error_count(adev, &mmhub_v1_7_edc_cnt_regs[i], + reg_value, &sec_count, &ded_count); + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; +} + +static void mmhub_v1_7_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* write 0 to reset the edc counters */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_edc_cnt_regs); i++) + WREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_edc_cnt_regs[i]), 0); + } +} + +static const struct soc15_reg_entry mmhub_v1_7_ea_err_status_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_ERR_STATUS), 0, 0, 0 }, +}; + +static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev) +{ + int i; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) + return; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ea_err_status_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_ea_err_status_regs[i])); + if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) || + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) || + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { + dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n", + i, reg_value); + } + } +} + +static void mmhub_v1_7_reset_ras_error_status(struct amdgpu_device *adev) +{ + int i; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) + return; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ea_err_status_regs); i++) { + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + mmhub_v1_7_ea_err_status_regs[i])); + reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS, + CLEAR_ERROR_STATUS, 0x01); + WREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_ea_err_status_regs[i]), + reg_value); + } +} + +const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, + .ras_fini = amdgpu_mmhub_ras_fini, + .query_ras_error_count = mmhub_v1_7_query_ras_error_count, + .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count, + .query_ras_error_status = mmhub_v1_7_query_ras_error_status, + .reset_ras_error_status = mmhub_v1_7_reset_ras_error_status, +}; + +const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = { + .get_fb_location = mmhub_v1_7_get_fb_location, + .init = mmhub_v1_7_init, + .gart_enable = mmhub_v1_7_gart_enable, + .set_fault_enable_default = mmhub_v1_7_set_fault_enable_default, + .gart_disable = mmhub_v1_7_gart_disable, + .set_clockgating = mmhub_v1_7_set_clockgating, + .get_clockgating = mmhub_v1_7_get_clockgating, + .setup_vm_pt_regs = mmhub_v1_7_setup_vm_pt_regs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h new file mode 100644 index 000000000000..a7f9dfc24697 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h @@ -0,0 +1,29 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __MMHUB_V1_7_H__ +#define __MMHUB_V1_7_H__ + +extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs; +extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index a7cb185d639a..25f8e93e5ec3 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -29,20 +29,175 @@ #include "mmhub/mmhub_2_0_0_default.h" #include "navi10_enum.h" +#include "gc/gc_10_1_0_offset.h" #include "soc15_common.h" -void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, +#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d +#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX 0 +#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070 +#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0 + +static const char *mmhub_client_ids_navi1x[][2] = { + [3][0] = "DCEDMC", + [4][0] = "DCEVGA", + [5][0] = "MP0", + [6][0] = "MP1", + [13][0] = "VMC", + [14][0] = "HDP", + [15][0] = "OSS", + [16][0] = "VCNU", + [17][0] = "JPEG", + [18][0] = "VCN", + [3][1] = "DCEDMC", + [4][1] = "DCEXFC", + [5][1] = "DCEVGA", + [6][1] = "DCEDWB", + [7][1] = "MP0", + [8][1] = "MP1", + [9][1] = "DBGU1", + [10][1] = "DBGU0", + [11][1] = "XDP", + [14][1] = "HDP", + [15][1] = "OSS", + [16][1] = "VCNU", + [17][1] = "JPEG", + [18][1] = "VCN", +}; + +static const char *mmhub_client_ids_sienna_cichlid[][2] = { + [3][0] = "DCEDMC", + [4][0] = "DCEVGA", + [5][0] = "MP0", + [6][0] = "MP1", + [8][0] = "VMC", + [9][0] = "VCNU0", + [10][0] = "JPEG", + [12][0] = "VCNU1", + [13][0] = "VCN1", + [14][0] = "HDP", + [15][0] = "OSS", + [32+11][0] = "VCN0", + [0][1] = "DBGU0", + [1][1] = "DBGU1", + [2][1] = "DCEDWB", + [3][1] = "DCEDMC", + [4][1] = "DCEVGA", + [5][1] = "MP0", + [6][1] = "MP1", + [7][1] = "XDP", + [9][1] = "VCNU0", + [10][1] = "JPEG", + [11][1] = "VCN0", + [12][1] = "VCNU1", + [13][1] = "VCN1", + [14][1] = "HDP", + [15][1] = "OSS", +}; + +static const char *mmhub_client_ids_beige_goby[][2] = { + [3][0] = "DCEDMC", + [4][0] = "DCEVGA", + [5][0] = "MP0", + [6][0] = "MP1", + [8][0] = "VMC", + [9][0] = "VCNU0", + [11][0] = "VCN0", + [14][0] = "HDP", + [15][0] = "OSS", + [0][1] = "DBGU0", + [1][1] = "DBGU1", + [2][1] = "DCEDWB", + [3][1] = "DCEDMC", + [4][1] = "DCEVGA", + [5][1] = "MP0", + [6][1] = "MP1", + [7][1] = "XDP", + [9][1] = "VCNU0", + [11][1] = "VCN0", + [14][1] = "HDP", + [15][1] = "OSS", +}; + +static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + uint32_t cid, rw; + const char *mmhub_cid = NULL; + + cid = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, CID); + rw = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, RW); + + dev_err(adev->dev, + "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): + mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; + break; + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; + break; + case IP_VERSION(2, 1, 2): + mmhub_cid = mmhub_client_ids_beige_goby[cid][rw]; + break; + default: + mmhub_cid = NULL; + break; + } + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + mmhub_cid ? mmhub_cid : "unknown", cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%x\n", rw); +} + +static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */ - int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - offset * vmid, lower_32_bits(page_table_base)); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, + lower_32_bits(page_table_base)); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - offset * vmid, upper_32_bits(page_table_base)); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, + upper_32_bits(page_table_base)); } static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) @@ -51,14 +206,14 @@ static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, (u32)(adev->gmc.gart_start >> 44)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, (u32)(adev->gmc.gart_end >> 44)); } @@ -67,20 +222,21 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) uint64_t value; uint32_t tmp; - /* Disable AGP. */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF); - - /* Program the system aperture low logical page number. */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + if (!amdgpu_sriov_vf(adev)) { + /* Program the AGP BAR */ + WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + } /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, (u32)(value >> 12)); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, @@ -122,6 +278,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -157,6 +319,10 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp); + + tmp = mmMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); + WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp); } static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) @@ -168,11 +334,17 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); } static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) { + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); @@ -193,6 +365,7 @@ static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; int i; uint32_t tmp; @@ -222,43 +395,37 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) /* Send no-retry XNACK on fault to suppress VM fault storm. */ tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !amdgpu_noretry); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i, tmp); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2, - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2, - upper_32_bits(adev->vm_manager.max_pfn - 1)); + !adev->gmc.noretry); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); } } static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; unsigned i; for (i = 0; i < 18; ++i) { - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, - 2 * i, 0xffffffff); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, - 2 * i, 0x1f); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); } } -int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) +static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v2_0_init_gart_aperture_regs(adev); mmhub_v2_0_init_system_aperture_regs(adev); @@ -273,14 +440,16 @@ int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) return 0; } -void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) +static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; u32 tmp; u32 i; /* Disable all tables */ - for (i = 0; i < 16; i++) - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, i, 0); + for (i = 0; i < AMDGPU_NUM_VMID; i++) + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); /* Setup TLB control */ tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); @@ -302,9 +471,16 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) +static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -338,7 +514,12 @@ void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); } -void mmhub_v2_0_init(struct amdgpu_device *adev) +static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = { + .print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status, + .get_invalidate_req = mmhub_v2_0_get_invalidate_req, +}; + +static void mmhub_v2_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; @@ -361,6 +542,23 @@ void mmhub_v2_0_init(struct amdgpu_device *adev) hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); + hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ - + mmMMVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs; } static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -368,11 +566,23 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad { uint32_t def, data, def1, data1; - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); + if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) + return; - def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); + def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); + break; + default: + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); + def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + break; + } - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { + if (enable) { data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | @@ -393,11 +603,22 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } - if (def != data) - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); - - if (def1 != data1) - WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): + if (def != data) + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); + if (def1 != data1) + WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1); + break; + default: + if (def != data) + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); + if (def1 != data1) + WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); + break; + } } static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, @@ -405,31 +626,55 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade { uint32_t def, data; - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); + if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) + return; - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); + break; + default: + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); + break; + } + + if (enable) data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; else data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; - if (def != data) - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); + if (def != data) { + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); + break; + default: + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); + break; + } + } } -int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, +static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state) { if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): mmhub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -438,16 +683,25 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, return 0; } -void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) +static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) { int data, data1; if (amdgpu_sriov_vf(adev)) *flags = 0; - data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); - - data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): + data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); + data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); + break; + default: + data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); + data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + break; + } /* AMD_CG_SUPPORT_MC_MGCG */ if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) && @@ -463,3 +717,13 @@ void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) *flags |= AMD_CG_SUPPORT_MC_LS; } + +const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = { + .init = mmhub_v2_0_init, + .gart_enable = mmhub_v2_0_gart_enable, + .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default, + .gart_disable = mmhub_v2_0_gart_disable, + .set_clockgating = mmhub_v2_0_set_clockgating, + .get_clockgating = mmhub_v2_0_get_clockgating, + .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h index 3ea4344f0315..f80f461d67da 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h @@ -23,15 +23,6 @@ #ifndef __MMHUB_V2_0_H__ #define __MMHUB_V2_0_H__ -int mmhub_v2_0_gart_enable(struct amdgpu_device *adev); -void mmhub_v2_0_gart_disable(struct amdgpu_device *adev); -void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void mmhub_v2_0_init(struct amdgpu_device *adev); -int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, - enum amd_clockgating_state state); -void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); -void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); +extern const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c new file mode 100644 index 000000000000..a11d60ec6321 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -0,0 +1,626 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "mmhub_v2_3.h" + +#include "mmhub/mmhub_2_3_0_offset.h" +#include "mmhub/mmhub_2_3_0_sh_mask.h" +#include "mmhub/mmhub_2_3_0_default.h" +#include "navi10_enum.h" + +#include "soc15_common.h" + +static const char *mmhub_client_ids_vangogh[][2] = { + [0][0] = "MP0", + [1][0] = "MP1", + [2][0] = "DCEDMC", + [3][0] = "DCEVGA", + [13][0] = "UTCL2", + [26][0] = "OSS", + [27][0] = "HDP", + [28][0] = "VCN", + [29][0] = "VCNU", + [30][0] = "JPEG", + [0][1] = "MP0", + [1][1] = "MP1", + [2][1] = "DCEDMC", + [3][1] = "DCEVGA", + [4][1] = "DCEDWB", + [5][1] = "XDP", + [26][1] = "OSS", + [27][1] = "HDP", + [28][1] = "VCN", + [29][1] = "VCNU", + [30][1] = "JPEG", +}; + +static uint32_t mmhub_v2_3_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + uint32_t cid, rw; + const char *mmhub_cid = NULL; + + cid = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, CID); + rw = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, RW); + + dev_err(adev->dev, + "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 4, 0): + mmhub_cid = mmhub_client_ids_vangogh[cid][rw]; + break; + default: + mmhub_cid = NULL; + break; + } + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + mmhub_cid ? mmhub_cid : "unknown", cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%x\n", rw); +} + +static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev, + uint32_t vmid, + uint64_t page_table_base) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); + + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); +} + +static void mmhub_v2_3_init_gart_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v2_3_setup_vm_pt_regs(adev, 0, pt_base); + + WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + (u32)(adev->gmc.gart_end >> 44)); +} + +static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev) +{ + uint64_t value; + uint32_t tmp; + + /* Disable AGP. */ + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + (u32)((u64)adev->dummy_page_addr >> 44)); + + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2, tmp); +} + +static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); + + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC); /* UC, uncached */ + + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); +} + +static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, + ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, + 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); + + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); + + tmp = mmMMVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); + + tmp = mmMMVM_L2_CNTL4_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL4, tmp); + + tmp = mmMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); + WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp); +} + +static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev) +{ + uint32_t tmp; + + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); +} + +static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev) +{ + WREG32_SOC15(MMHUB, 0, + mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + 0xFFFFFFFF); + WREG32_SOC15(MMHUB, 0, + mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + 0x0000000F); + + WREG32_SOC15(MMHUB, 0, + mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); + WREG32_SOC15(MMHUB, 0, + mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); + + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, + 0); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, + 0); +} + +static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + int i; + uint32_t tmp; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, + adev->vm_manager.num_level); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !adev->gmc.noretry); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, + i * hub->ctx_distance, tmp); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + i * hub->ctx_addr_distance, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + i * hub->ctx_addr_distance, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + i * hub->ctx_addr_distance, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } +} + +static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + unsigned i; + + for (i = 0; i < 18; ++i) { + WREG32_SOC15_OFFSET(MMHUB, 0, + mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + i * hub->eng_addr_distance, 0xffffffff); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + i * hub->eng_addr_distance, 0x1f); + } +} + +static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) { + /* + * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are + * VF copy registers so vbios post doesn't program them, for + * SRIOV driver need to program them + */ + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE, + adev->gmc.vram_start >> 24); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP, + adev->gmc.vram_end >> 24); + } + + /* GART Enable. */ + mmhub_v2_3_init_gart_aperture_regs(adev); + mmhub_v2_3_init_system_aperture_regs(adev); + mmhub_v2_3_init_tlb_regs(adev); + mmhub_v2_3_init_cache_regs(adev); + + mmhub_v2_3_enable_system_domain(adev); + mmhub_v2_3_disable_identity_aperture(adev); + mmhub_v2_3_setup_vmid_config(adev); + mmhub_v2_3_program_invalidation(adev); + + return 0; +} + +static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + u32 tmp; + u32 i; + + /* Disable all tables */ + for (i = 0; i < AMDGPU_NUM_VMID; i++) + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, + i * hub->ctx_distance, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_MX_L1_TLB_CNTL, tmp); + + /* Setup L2 cache */ + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL, tmp); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, 0); +} + +/** + * mmhub_v2_3_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +static void mmhub_v2_3_set_fault_enable_default(struct amdgpu_device *adev, + bool value) +{ + u32 tmp; + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + if (!value) { + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); +} + +static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = { + .print_l2_protection_fault_status = mmhub_v2_3_print_l2_protection_fault_status, + .get_invalidate_req = mmhub_v2_3_get_invalidate_req, +}; + +static void mmhub_v2_3_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + hub->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(MMHUB, 0, + mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); + hub->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(MMHUB, 0, + mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, + mmMMVM_INVALIDATE_ENG0_SEM); + hub->vm_inv_eng0_req = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); + hub->vm_inv_eng0_ack = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ACK); + hub->vm_context0_cntl = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); + hub->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS); + hub->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); + + hub->ctx_distance = mmMMVM_CONTEXT1_CNTL - mmMMVM_CONTEXT0_CNTL; + hub->ctx_addr_distance = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub->eng_distance = mmMMVM_INVALIDATE_ENG1_REQ - + mmMMVM_INVALIDATE_ENG0_REQ; + hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &mmhub_v2_3_vmhub_funcs; +} + +static void +mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data, def1, data1; + + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); + def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; + data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); + + } else { + data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; + data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); + } + + if (def != data) + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); + if (def1 != data1) + WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); +} + +static void +mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data, def1, data1, def2, data2; + + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); + def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); + def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; + data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + } else { + data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; + data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + } + + if (def != data) + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); + if (def1 != data1) + WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1); + if (def2 != data2) + WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2); +} + +static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + if (amdgpu_sriov_vf(adev)) + return 0; + + mmhub_v2_3_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + mmhub_v2_3_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); + + return 0; +} + +static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags) +{ + int data, data1, data2, data3; + + if (amdgpu_sriov_vf(adev)) + *flags = 0; + + data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); + data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); + data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); + + /* AMD_CG_SUPPORT_MC_MGCG */ + if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)) + && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) { + *flags |= AMD_CG_SUPPORT_MC_MGCG; + } + + /* AMD_CG_SUPPORT_MC_LS */ + if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK) + && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)) + && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))) + *flags |= AMD_CG_SUPPORT_MC_LS; +} + +const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = { + .init = mmhub_v2_3_init, + .gart_enable = mmhub_v2_3_gart_enable, + .set_fault_enable_default = mmhub_v2_3_set_fault_enable_default, + .gart_disable = mmhub_v2_3_gart_disable, + .set_clockgating = mmhub_v2_3_set_clockgating, + .get_clockgating = mmhub_v2_3_get_clockgating, + .setup_vm_pt_regs = mmhub_v2_3_setup_vm_pt_regs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.h index 2fcc4b60153c..2926d21dea8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Advanced Micro Devices, Inc. + * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,10 +20,9 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#ifndef __MMHUB_V2_3_H__ +#define __MMHUB_V2_3_H__ -#ifndef __CIK_DPM_H__ -#define __CIK_DPM_H__ - -extern const struct amdgpu_ip_block_version kv_smu_ip_block; +extern const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 66efe2f7bd76..c4ef822bbe8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -21,6 +21,7 @@ * */ #include "amdgpu.h" +#include "amdgpu_ras.h" #include "mmhub_v9_4.h" #include "mmhub/mmhub_9_4_1_offset.h" @@ -29,13 +30,13 @@ #include "athub/athub_1_0_offset.h" #include "athub/athub_1_0_sh_mask.h" #include "vega10_enum.h" - +#include "soc15.h" #include "soc15_common.h" #define MMHUB_NUM_INSTANCES 2 #define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000 -u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) +static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) { /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */ u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE); @@ -53,23 +54,19 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) return base; } -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, +static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid, uint32_t vmid, uint64_t value) { - /* two registers distance between mmVML2VC0_VM_CONTEXT0_* to - * mmVML2VC0_VM_CONTEXT1_* - */ - int dist = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - - mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + hub->ctx_addr_distance * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, lower_32_bits(value)); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + hub->ctx_addr_distance * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, upper_32_bits(value)); } @@ -79,7 +76,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, { uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - mmhub_v9_4_setup_vm_pt_regs(adev, hubid, 0, pt_base); + mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -100,6 +97,16 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, (u32)(adev->gmc.gart_end >> 44)); } +static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) +{ + int i; + + for (i = 0; i < MMHUB_NUM_INSTANCES; i++) + mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid, + page_table_base); +} + static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, int hubid) { @@ -117,45 +124,52 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, adev->gmc.agp_start >> 24); - /* Program the system aperture low logical page number. */ - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); + if (!amdgpu_sriov_vf(adev)) { + /* Program the system aperture low logical page number. */ + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; - WREG32_SOC15_OFFSET(MMHUB, 0, + /* Set default page address. */ + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, (u32)(value >> 12)); - WREG32_SOC15_OFFSET(MMHUB, 0, + WREG32_SOC15_OFFSET( + MMHUB, 0, mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, (u32)(value >> 44)); - /* Program "protection fault". */ - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - (u32)(adev->dummy_page_addr >> 12)); - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - (u32)((u64)adev->dummy_page_addr >> 44)); + /* Program "protection fault". */ + WREG32_SOC15_OFFSET( + MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15_OFFSET( + MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)((u64)adev->dummy_page_addr >> 44)); - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, - mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); - WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + tmp = RREG32_SOC15_OFFSET( + MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + tmp); + } } static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) @@ -282,6 +296,7 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev, static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; uint32_t tmp; int i; @@ -313,23 +328,28 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) adev->vm_manager.block_size - 9); /* Send no-retry XNACK on fault to suppress VM fault storm. */ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !adev->gmc.noretry); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i, - tmp); + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + + i * hub->ctx_distance, tmp); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0); + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + + i * hub->ctx_addr_distance, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0); + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + + i * hub->ctx_addr_distance, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + + i * hub->ctx_addr_distance, lower_32_bits(adev->vm_manager.max_pfn - 1)); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + + i * hub->ctx_addr_distance, upper_32_bits(adev->vm_manager.max_pfn - 1)); } } @@ -337,49 +357,38 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev, int hubid) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; unsigned i; for (i = 0; i < 18; ++i) { WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + + i * hub->eng_addr_distance, 0xffffffff); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + + i * hub->eng_addr_distance, 0x1f); } } -int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) +static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) { int i; for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { - if (amdgpu_sriov_vf(adev)) { - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase - * they are VF copy registers so vbios post doesn't - * program them, for SRIOV driver need to program them - */ - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE, - i * MMHUB_INSTANCE_REGISTER_OFFSET, - adev->gmc.vram_start >> 24); - WREG32_SOC15_OFFSET(MMHUB, 0, - mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP, - i * MMHUB_INSTANCE_REGISTER_OFFSET, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v9_4_init_gart_aperture_regs(adev, i); mmhub_v9_4_init_system_aperture_regs(adev, i); mmhub_v9_4_init_tlb_regs(adev, i); - mmhub_v9_4_init_cache_regs(adev, i); + if (!amdgpu_sriov_vf(adev)) + mmhub_v9_4_init_cache_regs(adev, i); mmhub_v9_4_enable_system_domain(adev, i); - mmhub_v9_4_disable_identity_aperture(adev, i); + if (!amdgpu_sriov_vf(adev)) + mmhub_v9_4_disable_identity_aperture(adev, i); mmhub_v9_4_setup_vmid_config(adev, i); mmhub_v9_4_program_invalidation(adev, i); } @@ -387,18 +396,19 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) return 0; } -void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) +static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; u32 tmp; u32 i, j; for (j = 0; j < MMHUB_NUM_INSTANCES; j++) { /* Disable all tables */ - for (i = 0; i < 16; i++) + for (i = 0; i < AMDGPU_NUM_VMID; i++) WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL, j * MMHUB_INSTANCE_REGISTER_OFFSET + - i, 0); + i * hub->ctx_distance, 0); /* Setup TLB control */ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, @@ -426,12 +436,12 @@ void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) } /** - * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling + * mmhub_v9_4_set_fault_enable_default - update GART/VM fault handling * * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value) +static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; int i; @@ -489,7 +499,7 @@ void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value) } } -void mmhub_v9_4_init(struct amdgpu_device *adev) +static void mmhub_v9_4_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]}; @@ -528,6 +538,15 @@ void mmhub_v9_4_init(struct amdgpu_device *adev) SOC15_REG_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) + i * MMHUB_INSTANCE_REGISTER_OFFSET; + + hub[i]->ctx_distance = mmVML2VC0_VM_CONTEXT1_CNTL - + mmVML2VC0_VM_CONTEXT0_CNTL; + hub[i]->ctx_addr_distance = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + hub[i]->eng_distance = mmVML2VC0_VM_INVALIDATE_ENG1_REQ - + mmVML2VC0_VM_INVALIDATE_ENG0_REQ; + hub[i]->eng_addr_distance = mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - + mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32; } } @@ -610,7 +629,7 @@ static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *ade } } -int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, +static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state) { if (amdgpu_sriov_vf(adev)) @@ -619,9 +638,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_ARCTURUS: mmhub_v9_4_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v9_4_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -630,7 +649,7 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, return 0; } -void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) +static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) { int data, data1; @@ -655,3 +674,1004 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) *flags |= AMD_CG_SUPPORT_MC_LS; } + +static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { + /* MMHUB Range 0 */ + { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 1 */ + { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHAB Range 2*/ + { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Rang 3 */ + { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 4 */ + { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUAB Range 5 */ + { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 6 */ + { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 7*/ + { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT), + } +}; + +static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 }, +}; + +static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev, + const struct soc15_reg_entry *reg, + uint32_t value, + uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) { + if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset) + continue; + + sec_cnt = (value & + mmhub_v9_4_ras_fields[i].sec_count_mask) >> + mmhub_v9_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n", + mmhub_v9_4_ras_fields[i].name, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + mmhub_v9_4_ras_fields[i].ded_count_mask) >> + mmhub_v9_4_ras_fields[i].ded_count_shift; + if (ded_cnt) { + dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n", + mmhub_v9_4_ras_fields[i].name, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i; + uint32_t reg_value; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); + if (reg_value) + mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i], + reg_value, &sec_count, &ded_count); + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; +} + +static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); + } +} + +static const struct soc15_reg_entry mmhub_v9_4_err_status_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_ERR_STATUS), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_ERR_STATUS), 0, 0, 0 }, +}; + +static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev) +{ + int i; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) + return; + + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i])); + if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) || + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) || + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { + /* SDP read/write error/parity error in FUE_IS_FATAL mode + * can cause system fatal error in arcturas. Harvest the error + * status before GPU reset */ + dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n", + i, reg_value); + } + } +} + +const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, + .ras_fini = amdgpu_mmhub_ras_fini, + .query_ras_error_count = mmhub_v9_4_query_ras_error_count, + .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count, + .query_ras_error_status = mmhub_v9_4_query_ras_error_status, +}; + +const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { + .get_fb_location = mmhub_v9_4_get_fb_location, + .init = mmhub_v9_4_init, + .gart_enable = mmhub_v9_4_gart_enable, + .set_fault_enable_default = mmhub_v9_4_set_fault_enable_default, + .gart_disable = mmhub_v9_4_gart_disable, + .set_clockgating = mmhub_v9_4_set_clockgating, + .get_clockgating = mmhub_v9_4_get_clockgating, + .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h index d435cfcec1a8..90436efa92ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h @@ -23,14 +23,7 @@ #ifndef __MMHUB_V9_4_H__ #define __MMHUB_V9_4_H__ -u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev); -int mmhub_v9_4_gart_enable(struct amdgpu_device *adev); -void mmhub_v9_4_gart_disable(struct amdgpu_device *adev); -void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void mmhub_v9_4_init(struct amdgpu_device *adev); -int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, - enum amd_clockgating_state state); -void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags); +extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs; +extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h index 8af0bddf85e4..2cdab8062c86 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h @@ -24,9 +24,7 @@ #ifndef __MMSCH_V1_0_H__ #define __MMSCH_V1_0_H__ -#define MMSCH_VERSION_MAJOR 1 -#define MMSCH_VERSION_MINOR 0 -#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) +#define MMSCH_VERSION 0x1 enum mmsch_v1_0_command_type { MMSCH_COMMAND__DIRECT_REG_WRITE = 0, @@ -47,6 +45,18 @@ struct mmsch_v1_0_init_header { uint32_t uvd_table_size; }; +struct mmsch_vf_eng_init_header { + uint32_t init_status; + uint32_t table_offset; + uint32_t table_size; +}; + +struct mmsch_v1_1_init_header { + uint32_t version; + uint32_t total_size; + struct mmsch_vf_eng_init_header eng[2]; +}; + struct mmsch_v1_0_cmd_direct_reg_header { uint32_t reg_offset : 28; uint32_t command_type : 4; diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h new file mode 100644 index 000000000000..1b5086c7d4e6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h @@ -0,0 +1,338 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMSCH_V2_0_H__ +#define __MMSCH_V2_0_H__ + +// addressBlock: uvd0_mmsch_dec +// base address: 0x1e000 +#define mmMMSCH_UCODE_ADDR 0x0000 +#define mmMMSCH_UCODE_ADDR_BASE_IDX 0 +#define mmMMSCH_UCODE_DATA 0x0001 +#define mmMMSCH_UCODE_DATA_BASE_IDX 0 +#define mmMMSCH_SRAM_ADDR 0x0002 +#define mmMMSCH_SRAM_ADDR_BASE_IDX 0 +#define mmMMSCH_SRAM_DATA 0x0003 +#define mmMMSCH_SRAM_DATA_BASE_IDX 0 +#define mmMMSCH_VF_SRAM_OFFSET 0x0004 +#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_DB_SRAM_OFFSET 0x0005 +#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTX_SRAM_OFFSET 0x0006 +#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTL 0x0007 +#define mmMMSCH_CTL_BASE_IDX 0 +#define mmMMSCH_INTR 0x0008 +#define mmMMSCH_INTR_BASE_IDX 0 +#define mmMMSCH_INTR_ACK 0x0009 +#define mmMMSCH_INTR_ACK_BASE_IDX 0 +#define mmMMSCH_INTR_STATUS 0x000a +#define mmMMSCH_INTR_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_VMID 0x000b +#define mmMMSCH_VF_VMID_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_LO 0x000c +#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_HI 0x000d +#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_CTX_SIZE 0x000e +#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f +#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010 +#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_SIZE 0x0011 +#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_HOST 0x0012 +#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_RESP 0x0013 +#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0 0x0014 +#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015 +#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1 0x0016 +#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017 +#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0 +#define mmMMSCH_CNTL 0x001c +#define mmMMSCH_CNTL_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET0 0x001d +#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE0 0x001e +#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET1 0x001f +#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE1 0x0020 +#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0 +#define mmMMSCH_PDEBUG_STATUS 0x0021 +#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EPC 0x0024 +#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025 +#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0 +#define mmMMSCH_PROC_STATE1 0x0026 +#define mmMMSCH_PROC_STATE1_BASE_IDX 0 +#define mmMMSCH_LAST_MC_ADDR 0x0027 +#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028 +#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029 +#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0 +#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a +#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmMMSCH_SCRATCH_0 0x002b +#define mmMMSCH_SCRATCH_0_BASE_IDX 0 +#define mmMMSCH_SCRATCH_1 0x002c +#define mmMMSCH_SCRATCH_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d +#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e +#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f +#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_0 0x0033 +#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_0 0x0034 +#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_0 0x0035 +#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038 +#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_1 0x003c +#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_1 0x003d +#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_1 0x003e +#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT 0x003f +#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0 +#define mmMMSCH_SCRATCH_2 0x0040 +#define mmMMSCH_SCRATCH_2_BASE_IDX 0 +#define mmMMSCH_SCRATCH_3 0x0041 +#define mmMMSCH_SCRATCH_3_BASE_IDX 0 +#define mmMMSCH_SCRATCH_4 0x0042 +#define mmMMSCH_SCRATCH_4_BASE_IDX 0 +#define mmMMSCH_SCRATCH_5 0x0043 +#define mmMMSCH_SCRATCH_5_BASE_IDX 0 +#define mmMMSCH_SCRATCH_6 0x0044 +#define mmMMSCH_SCRATCH_6_BASE_IDX 0 +#define mmMMSCH_SCRATCH_7 0x0045 +#define mmMMSCH_SCRATCH_7_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046 +#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047 +#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048 +#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049 +#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0 +#define mmMMSCH_NACK_STATUS 0x004a +#define mmMMSCH_NACK_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX0_DATA 0x004b +#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX1_DATA 0x004c +#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053 +#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056 +#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_2 0x005a +#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_2 0x005b +#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_2 0x005c +#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060 +#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061 +#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_0 0x0062 +#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_1 0x0063 +#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_2 0x0064 +#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0 + +#define MMSCH_VERSION_MAJOR 2 +#define MMSCH_VERSION_MINOR 0 +#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) + +enum mmsch_v2_0_command_type { + MMSCH_COMMAND__DIRECT_REG_WRITE = 0, + MMSCH_COMMAND__DIRECT_REG_POLLING = 2, + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3, + MMSCH_COMMAND__INDIRECT_REG_WRITE = 8, + MMSCH_COMMAND__END = 0xf +}; + +struct mmsch_v2_0_init_header { + uint32_t version; + uint32_t header_size; + uint32_t vcn_init_status; + uint32_t vcn_table_offset; + uint32_t vcn_table_size; +}; + +struct mmsch_v2_0_cmd_direct_reg_header { + uint32_t reg_offset : 28; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_indirect_reg_header { + uint32_t reg_offset : 20; + uint32_t reg_idx_space : 8; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_direct_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t reg_value; +}; + +struct mmsch_v2_0_cmd_direct_read_modify_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t write_data; + uint32_t mask_value; +}; + +struct mmsch_v2_0_cmd_direct_polling { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t mask_value; + uint32_t wait_value; +}; + +struct mmsch_v2_0_cmd_end { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; +}; + +struct mmsch_v2_0_cmd_indirect_write { + struct mmsch_v2_0_cmd_indirect_reg_header cmd_header; + uint32_t reg_value; +}; + +static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t value) +{ + direct_wt->cmd_header.reg_offset = reg_offset; + direct_wt->reg_value = value; + memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write)); +} + +static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t data) +{ + direct_rd_mod_wt->cmd_header.reg_offset = reg_offset; + direct_rd_mod_wt->mask_value = mask; + direct_rd_mod_wt->write_data = data; + memcpy((void *)init_table, direct_rd_mod_wt, + sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)); +} + +static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t wait) +{ + direct_poll->cmd_header.reg_offset = reg_offset; + direct_poll->mask_value = mask; + direct_poll->wait_value = wait; + memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling)); +} + +#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \ + mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \ + init_table, (reg), \ + (mask), (data)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \ + mmsch_v2_0_insert_direct_wt(&direct_wt, \ + init_table, (reg), \ + (value)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \ + mmsch_v2_0_insert_direct_poll(&direct_poll, \ + init_table, (reg), \ + (mask), (wait)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ +} + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h new file mode 100644 index 000000000000..3e4e858a6965 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h @@ -0,0 +1,130 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMSCH_V3_0_H__ +#define __MMSCH_V3_0_H__ + +#include "amdgpu_vcn.h" + +#define MMSCH_VERSION_MAJOR 3 +#define MMSCH_VERSION_MINOR 0 +#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) + +enum mmsch_v3_0_command_type { + MMSCH_COMMAND__DIRECT_REG_WRITE = 0, + MMSCH_COMMAND__DIRECT_REG_POLLING = 2, + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3, + MMSCH_COMMAND__INDIRECT_REG_WRITE = 8, + MMSCH_COMMAND__END = 0xf +}; + +struct mmsch_v3_0_table_info { + uint32_t init_status; + uint32_t table_offset; + uint32_t table_size; +}; + +struct mmsch_v3_0_init_header { + uint32_t version; + uint32_t total_size; + struct mmsch_v3_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES]; +}; + +struct mmsch_v3_0_cmd_direct_reg_header { + uint32_t reg_offset : 28; + uint32_t command_type : 4; +}; + +struct mmsch_v3_0_cmd_indirect_reg_header { + uint32_t reg_offset : 20; + uint32_t reg_idx_space : 8; + uint32_t command_type : 4; +}; + +struct mmsch_v3_0_cmd_direct_write { + struct mmsch_v3_0_cmd_direct_reg_header cmd_header; + uint32_t reg_value; +}; + +struct mmsch_v3_0_cmd_direct_read_modify_write { + struct mmsch_v3_0_cmd_direct_reg_header cmd_header; + uint32_t write_data; + uint32_t mask_value; +}; + +struct mmsch_v3_0_cmd_direct_polling { + struct mmsch_v3_0_cmd_direct_reg_header cmd_header; + uint32_t mask_value; + uint32_t wait_value; +}; + +struct mmsch_v3_0_cmd_end { + struct mmsch_v3_0_cmd_direct_reg_header cmd_header; +}; + +struct mmsch_v3_0_cmd_indirect_write { + struct mmsch_v3_0_cmd_indirect_reg_header cmd_header; + uint32_t reg_value; +}; + +#define MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \ + size = sizeof(struct mmsch_v3_0_cmd_direct_read_modify_write); \ + size_dw = size / 4; \ + direct_rd_mod_wt.cmd_header.reg_offset = reg; \ + direct_rd_mod_wt.mask_value = mask; \ + direct_rd_mod_wt.write_data = data; \ + memcpy((void *)table_loc, &direct_rd_mod_wt, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V3_0_INSERT_DIRECT_WT(reg, value) { \ + size = sizeof(struct mmsch_v3_0_cmd_direct_write); \ + size_dw = size / 4; \ + direct_wt.cmd_header.reg_offset = reg; \ + direct_wt.reg_value = value; \ + memcpy((void *)table_loc, &direct_wt, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V3_0_INSERT_DIRECT_POLL(reg, mask, wait) { \ + size = sizeof(struct mmsch_v3_0_cmd_direct_polling); \ + size_dw = size / 4; \ + direct_poll.cmd_header.reg_offset = reg; \ + direct_poll.mask_value = mask; \ + direct_poll.wait_value = wait; \ + memcpy((void *)table_loc, &direct_poll, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#define MMSCH_V3_0_INSERT_END() { \ + size = sizeof(struct mmsch_v3_0_cmd_end); \ + size_dw = size / 4; \ + memcpy((void *)table_loc, &end, size); \ + table_loc += size_dw; \ + table_size += size_dw; \ +} + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index cc5bf595f9b1..23b066bcffb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -158,82 +158,6 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_ai_mailbox_set_valid(adev, false); } -static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) -{ - int r = 0; - u32 req, val, size; - - if (!amdgim_is_hwperf(adev) || buf == NULL) - return -EBADRQC; - - switch(type) { - case PP_SCLK: - req = IDH_IRQ_GET_PP_SCLK; - break; - case PP_MCLK: - req = IDH_IRQ_GET_PP_MCLK; - break; - default: - return -EBADRQC; - } - - mutex_lock(&adev->virt.dpm_mutex); - - xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); - - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); - if (!r && adev->fw_vram_usage.va != NULL) { - val = RREG32_NO_KIQ( - SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); - size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + - val), PAGE_SIZE); - - if (size < PAGE_SIZE) - strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); - else - size = 0; - - r = size; - goto out; - } - - r = xgpu_ai_poll_msg(adev, IDH_FAIL); - if(r) - pr_info("%s DPM request failed", - (type == PP_SCLK)? "SCLK" : "MCLK"); - -out: - mutex_unlock(&adev->virt.dpm_mutex); - return r; -} - -static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) -{ - int r = 0; - u32 req = IDH_IRQ_FORCE_DPM_LEVEL; - - if (!amdgim_is_hwperf(adev)) - return -EBADRQC; - - mutex_lock(&adev->virt.dpm_mutex); - xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); - - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); - if (!r) - goto out; - - r = xgpu_ai_poll_msg(adev, IDH_FAIL); - if (!r) - pr_info("DPM request failed"); - else - pr_info("Mailbox is broken"); - -out: - mutex_unlock(&adev->virt.dpm_mutex); - return r; -} - static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { @@ -263,7 +187,16 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, static int xgpu_ai_request_reset(struct amdgpu_device *adev) { - return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); + int ret, i = 0; + + while (i < AI_MAILBOX_POLL_MSG_REP_MAX) { + ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); + if (!ret) + break; + i++; + } + + return ret; } static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, @@ -314,19 +247,18 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; - int locked; /* block amdgpu_gpu_recover till msg FLR COMPLETE received, * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. - * - * we can unlock the lock_reset to allow "amdgpu_job_timedout" - * to run gpu_recover() after FLR_NOTIFICATION_CMPL received - * which means host side had finished this VF's FLR. */ - locked = mutex_trylock(&adev->lock_reset); - if (locked) - adev->in_gpu_reset = 1; + if (!down_write_trylock(&adev->reset_sem)) + return; + + amdgpu_virt_fini_data_exchange(adev); + atomic_set(&adev->in_gpu_reset, 1); + + xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); do { if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) @@ -337,14 +269,13 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } while (timeout > 1); flr_done: - if (locked) { - adev->in_gpu_reset = 0; - mutex_unlock(&adev->lock_reset); - } + atomic_set(&adev->in_gpu_reset, 0); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) - && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT) + && (!amdgpu_device_has_job_running(adev) || + adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) amdgpu_device_gpu_recover(adev, NULL); } @@ -455,6 +386,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .reset_gpu = xgpu_ai_request_reset, .wait_reset = NULL, .trans_msg = xgpu_ai_mailbox_trans_msg, - .get_pp_clk = xgpu_ai_get_pp_clk, - .force_dpm_level = xgpu_ai_force_dpm_level, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 077e91a33d62..bd3b23171579 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -25,8 +25,9 @@ #define __MXGPU_AI_H__ #define AI_MAILBOX_POLL_ACK_TIMEDOUT 500 -#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000 -#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500 +#define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000 +#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000 +#define AI_MAILBOX_POLL_MSG_REP_MAX 11 enum idh_request { IDH_REQ_GPU_INIT_ACCESS = 1, @@ -35,11 +36,8 @@ enum idh_request { IDH_REL_GPU_FINI_ACCESS, IDH_REQ_GPU_RESET_ACCESS, - IDH_IRQ_FORCE_DPM_LEVEL = 10, - IDH_IRQ_GET_PP_SCLK, - IDH_IRQ_GET_PP_MCLK, - IDH_LOG_VF_ERROR = 200, + IDH_READY_TO_RESET = 201, }; enum idh_event { @@ -50,7 +48,8 @@ enum idh_event { IDH_SUCCESS, IDH_FAIL, IDH_QUERY_ALIVE, - IDH_EVENT_MAX + + IDH_TEXT_MESSAGE = 255, }; extern const struct amdgpu_virt_ops xgpu_ai_virt_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 0d8767eb7a70..a35e6d87e537 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -30,7 +30,6 @@ #include "navi10_ih.h" #include "soc15_common.h" #include "mxgpu_nv.h" -#include "mxgpu_ai.h" static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) { @@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) */ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) { - return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); + return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); } @@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, { u32 reg; - reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); + reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0); if (reg != event) return -ENOENT; @@ -99,7 +96,11 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev) static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) { - int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT; + int r; + uint64_t timeout, now; + + now = (uint64_t)ktime_to_ms(ktime_get()); + timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT; do { r = xgpu_nv_mailbox_rcv_msg(adev, event); @@ -107,10 +108,9 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) return 0; msleep(10); - timeout -= 10; - } while (timeout > 1); + now = (uint64_t)ktime_to_ms(ktime_get()); + } while (timeout > now); - pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); return -ETIME; } @@ -118,7 +118,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, enum idh_request req, u32 data1, u32 data2, u32 data3) { - u32 reg; int r; uint8_t trn; @@ -137,19 +136,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, } } while (trn); - reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0)); - reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0, - MSGBUF_DATA, req); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0), - reg); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1), - data1); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2), - data2); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3), - data3); - + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req); + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1); + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2); + WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3); xgpu_nv_mailbox_set_valid(adev, true); /* start to poll ack */ @@ -163,24 +153,53 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { - int r; + int r, retry = 1; + enum idh_event event = -1; +send_request: xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); - /* start to check msg if request is idh_req_gpu_init_access */ - if (req == IDH_REQ_GPU_INIT_ACCESS || - req == IDH_REQ_GPU_FINI_ACCESS || - req == IDH_REQ_GPU_RESET_ACCESS) { - r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + switch (req) { + case IDH_REQ_GPU_INIT_ACCESS: + case IDH_REQ_GPU_FINI_ACCESS: + case IDH_REQ_GPU_RESET_ACCESS: + event = IDH_READY_TO_ACCESS_GPU; + break; + case IDH_REQ_GPU_INIT_DATA: + event = IDH_REQ_GPU_INIT_DATA_READY; + break; + default: + break; + } + + if (event != -1) { + r = xgpu_nv_poll_msg(adev, event); if (r) { - pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); - return r; + if (retry++ < 2) + goto send_request; + + if (req != IDH_REQ_GPU_INIT_DATA) { + pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + return r; + } + else /* host doesn't support REQ_GPU_INIT_DATA handshake */ + adev->virt.req_init_data_ver = 0; + } else { + if (req == IDH_REQ_GPU_INIT_DATA) + { + adev->virt.req_init_data_ver = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1); + + /* assume V1 in case host doesn't set version number */ + if (adev->virt.req_init_data_ver < 1) + adev->virt.req_init_data_ver = 1; + } } + /* Retrieve checksum from mailbox2 */ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { adev->virt.fw_reserve.checksum_key = - RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2)); + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); } } @@ -189,7 +208,16 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, static int xgpu_nv_request_reset(struct amdgpu_device *adev) { - return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); + int ret, i = 0; + + while (i < NV_MAILBOX_POLL_MSG_REP_MAX) { + ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); + if (!ret) + break; + i++; + } + + return ret; } static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, @@ -213,6 +241,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, return r; } +static int xgpu_nv_request_init_data(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); +} + static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -226,11 +259,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); + + if (state == AMDGPU_IRQ_STATE_ENABLE) + tmp |= 2; + else + tmp &= ~2; - tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN, - (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); return 0; } @@ -240,19 +276,18 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; - int locked; /* block amdgpu_gpu_recover till msg FLR COMPLETE received, * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. - * - * we can unlock the lock_reset to allow "amdgpu_job_timedout" - * to run gpu_recover() after FLR_NOTIFICATION_CMPL received - * which means host side had finished this VF's FLR. */ - locked = mutex_trylock(&adev->lock_reset); - if (locked) - adev->in_gpu_reset = 1; + if (!down_write_trylock(&adev->reset_sem)) + return; + + amdgpu_virt_fini_data_exchange(adev); + atomic_set(&adev->in_gpu_reset, 1); + + xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); do { if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) @@ -263,13 +298,16 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) } while (timeout > 1); flr_done: - if (locked) { - adev->in_gpu_reset = 0; - mutex_unlock(&adev->lock_reset); - } + atomic_set(&adev->in_gpu_reset, 0); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ - if (amdgpu_device_should_recover_gpu(adev)) + if (amdgpu_device_should_recover_gpu(adev) + && (!amdgpu_device_has_job_running(adev) || + adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || + adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || + adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || + adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) amdgpu_device_gpu_recover(adev, NULL); } @@ -278,11 +316,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL); + + if (state == AMDGPU_IRQ_STATE_ENABLE) + tmp |= 1; + else + tmp &= ~1; - tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN, - (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp); return 0; } @@ -374,6 +415,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, + .req_init_data = xgpu_nv_request_init_data, .reset_gpu = xgpu_nv_request_reset, .wait_reset = NULL, .trans_msg = xgpu_nv_mailbox_trans_msg, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 99b15f6865cb..73887b0aa1d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -25,8 +25,34 @@ #define __MXGPU_NV_H__ #define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 -#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000 -#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500 +#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000 +#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000 +#define NV_MAILBOX_POLL_MSG_REP_MAX 11 + +enum idh_request { + IDH_REQ_GPU_INIT_ACCESS = 1, + IDH_REL_GPU_INIT_ACCESS, + IDH_REQ_GPU_FINI_ACCESS, + IDH_REL_GPU_FINI_ACCESS, + IDH_REQ_GPU_RESET_ACCESS, + IDH_REQ_GPU_INIT_DATA, + + IDH_LOG_VF_ERROR = 200, + IDH_READY_TO_RESET = 201, +}; + +enum idh_event { + IDH_CLR_MSG_BUF = 0, + IDH_READY_TO_ACCESS_GPU, + IDH_FLR_NOTIFICATION, + IDH_FLR_NOTIFICATION_CMPL, + IDH_SUCCESS, + IDH_FAIL, + IDH_QUERY_ALIVE, + IDH_REQ_GPU_INIT_DATA_READY, + + IDH_TEXT_MESSAGE = 255, +}; extern const struct amdgpu_virt_ops xgpu_nv_virt_ops; @@ -35,7 +61,21 @@ int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev); int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev); void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev); -#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4) -#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1) +#define mmMAILBOX_CONTROL 0xE5E + +#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (mmMAILBOX_CONTROL * 4) +#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE + 1) + +#define mmMAILBOX_MSGBUF_TRN_DW0 0xE56 +#define mmMAILBOX_MSGBUF_TRN_DW1 0xE57 +#define mmMAILBOX_MSGBUF_TRN_DW2 0xE58 +#define mmMAILBOX_MSGBUF_TRN_DW3 0xE59 + +#define mmMAILBOX_MSGBUF_RCV_DW0 0xE5A +#define mmMAILBOX_MSGBUF_RCV_DW1 0xE5B +#define mmMAILBOX_MSGBUF_RCV_DW2 0xE5C +#define mmMAILBOX_MSGBUF_RCV_DW3 0xE5D + +#define mmMAILBOX_INT_CNTL 0xE5F #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h index f13dc6cc158f..713ee66a4d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h @@ -43,7 +43,8 @@ enum idh_event { IDH_READY_TO_ACCESS_GPU, IDH_FLR_NOTIFICATION, IDH_FLR_NOTIFICATION_CMPL, - IDH_EVENT_MAX + + IDH_TEXT_MESSAGE = 255 }; extern const struct amdgpu_virt_ops xgpu_vi_virt_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 9af73567e716..1d8414c3fadb 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -32,45 +32,181 @@ #include "soc15_common.h" #include "navi10_ih.h" +#define MAX_REARM_RETRY 10 + +#define mmIH_CHICKEN_Sienna_Cichlid 0x018d +#define mmIH_CHICKEN_Sienna_Cichlid_BASE_IDX 0 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev); /** - * navi10_ih_enable_interrupts - Enable the interrupt ring buffer + * navi10_ih_init_register_offset - Initialize register offset for ih rings * * @adev: amdgpu_device pointer * - * Enable the interrupt ring buffer (NAVI10). + * Initialize register offset ih rings (NAVI10). */ -static void navi10_ih_enable_interrupts(struct amdgpu_device *adev) +static void navi10_ih_init_register_offset(struct amdgpu_device *adev) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); + struct amdgpu_ih_regs *ih_regs; + + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; + } - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - adev->irq.ih.enabled = true; + if (adev->irq.ih1.ring_size) { + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; + } + + if (adev->irq.ih2.ring_size) { + ih_regs = &adev->irq.ih2.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2; + } } /** - * navi10_ih_disable_interrupts - Disable the interrupt ring buffer + * force_update_wptr_for_self_int - Force update the wptr for self interrupt * * @adev: amdgpu_device pointer + * @threshold: threshold to trigger the wptr reporting + * @timeout: timeout to trigger the wptr reporting + * @enabled: Enable/disable timeout flush mechanism * - * Disable the interrupt ring buffer (NAVI10). + * threshold input range: 0 ~ 15, default 0, + * real_threshold = 2^threshold + * timeout input range: 0 ~ 20, default 8, + * real_timeout = (2^timeout) * 1024 / (socclk_freq) + * + * Force update wptr for self interrupt ( >= SIENNA_CICHLID). */ -static void navi10_ih_disable_interrupts(struct amdgpu_device *adev) +static void +force_update_wptr_for_self_int(struct amdgpu_device *adev, + u32 threshold, u32 timeout, bool enabled) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); + u32 ih_cntl, ih_rb_cntl; - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - adev->irq.ih.enabled = false; - adev->irq.ih.rptr = 0; + if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3)) + return; + + ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2); + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2, + SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout); + ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2, + SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, + RB_USED_INT_THRESHOLD, threshold); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl)) + return; + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + } + + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, + RB_USED_INT_THRESHOLD, threshold); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl)) + return; + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + } + + WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl); +} + +/** + * navi10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointet + * @enable: true - enable the interrupts, false - disable the interrupts + * + * Toggle the interrupt ring buffer (NAVI10) + */ +static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (enable) { + ih->enabled = true; + } else { + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; + } + + return 0; +} + +/** + * navi10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers + * + * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers + * + * Toggle all the available interrupt ring buffers (NAVI10). + */ +static int navi10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = navi10_ih_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; + } + } + + return 0; } static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) @@ -96,6 +232,78 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl return ih_rb_cntl; } +static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) +{ + u32 ih_doorbell_rtpr = 0; + + if (ih->use_doorbell) { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, OFFSET, + ih->doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 1); + } else { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 0); + } + return ih_doorbell_rtpr; +} + +/** + * navi10_ih_enable_ring - enable an ih ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Enable an ih ring buffer (NAVI10) + */ +static int navi10_ih_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = navi10_ih_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) { + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); + } + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } + + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); + + WREG32(ih_regs->ih_doorbell_rptr, navi10_ih_doorbell_rptr(ih)); + + return 0; +} + /** * navi10_ih_irq_init - init and enable the interrupt ring * @@ -109,61 +317,51 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl */ static int navi10_ih_irq_init(struct amdgpu_device *adev) { - struct amdgpu_ih_ring *ih = &adev->irq.ih; - int ret = 0; - u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken; + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + u32 ih_chicken; u32 tmp; + int ret; + int i; /* disable irqs */ - navi10_ih_disable_interrupts(adev); + ret = navi10_ih_toggle_interrupts(adev, false); + if (ret) + return ret; adev->nbio.funcs->ih_control(adev); - /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, - !!adev->irq.msi_enabled); - if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) { - if (ih->use_bus_addr) { - ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); - ih_chicken = REG_SET_FIELD(ih_chicken, - IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); + if (ih[0]->use_bus_addr) { + switch (adev->ip_versions[OSSSYS_HWIP][0]) { + case IP_VERSION(5, 0, 3): + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 1): + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid); + ih_chicken = REG_SET_FIELD(ih_chicken, + IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid, ih_chicken); + break; + default: + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); + ih_chicken = REG_SET_FIELD(ih_chicken, + IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); + break; + } } } - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); - - /* set the writeback address whether it's enabled or not */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, - lower_32_bits(ih->wptr_addr)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, - upper_32_bits(ih->wptr_addr) & 0xFFFF); - - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - - ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR); - if (ih->use_doorbell) { - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, - IH_DOORBELL_RPTR, OFFSET, - ih->doorbell_index); - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, - IH_DOORBELL_RPTR, ENABLE, 1); - } else { - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, - IH_DOORBELL_RPTR, ENABLE, 0); + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + ret = navi10_ih_enable_ring(adev, ih[i]); + if (ret) + return ret; + } } - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); - adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell, - ih->doorbell_index); + /* update doorbell range for ih ring 0*/ + adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, + ih[0]->doorbell_index); tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, @@ -177,9 +375,16 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) pci_set_master(adev->pdev); /* enable interrupts */ - navi10_ih_enable_interrupts(adev); + ret = navi10_ih_toggle_interrupts(adev, true); + if (ret) + return ret; + /* enable wptr force update for self int */ + force_update_wptr_for_self_int(adev, 0, 8, true); - return ret; + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + + return 0; } /** @@ -191,7 +396,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) */ static void navi10_ih_irq_disable(struct amdgpu_device *adev) { - navi10_ih_disable_interrupts(adev); + force_update_wptr_for_self_int(adev, 0, 8, false); + navi10_ih_toggle_interrupts(adev, false); /* Wait and acknowledge irq */ mdelay(1); @@ -201,6 +407,7 @@ static void navi10_ih_irq_disable(struct amdgpu_device *adev) * navi10_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to fetch wptr * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (NAVI10). Also check for @@ -210,15 +417,16 @@ static void navi10_ih_irq_disable(struct amdgpu_device *adev) static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - u32 wptr, reg, tmp; + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; wptr = le32_to_cpu(*ih->wptr_cpu); + ih_regs = &ih->ih_regs; if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); - wptr = RREG32_NO_KIQ(reg); + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); @@ -233,55 +441,37 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, wptr, ih->rptr, tmp); ih->rptr = tmp; - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); - tmp = RREG32_NO_KIQ(reg); + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32_NO_KIQ(reg, tmp); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } /** - * navi10_ih_decode_iv - decode an interrupt vector + * navi10_ih_irq_rearm - rearm IRQ if lost * * @adev: amdgpu_device pointer + * @ih: IH ring to match * - * Decodes the interrupt vector at the current rptr - * position and also advance the position. */ -static void navi10_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih, - struct amdgpu_iv_entry *entry) +static void navi10_ih_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - /* wptr/rptr are in bytes! */ - u32 ring_index = ih->rptr >> 2; - uint32_t dw[8]; - - dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); - dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); - dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); - dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); - dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); - dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); - dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); - dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); - - entry->client_id = dw[0] & 0xff; - entry->src_id = (dw[0] >> 8) & 0xff; - entry->ring_id = (dw[0] >> 16) & 0xff; - entry->vmid = (dw[0] >> 24) & 0xf; - entry->vmid_src = (dw[0] >> 31); - entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); - entry->timestamp_src = dw[2] >> 31; - entry->pasid = dw[3] & 0xffff; - entry->pasid_src = dw[3] >> 31; - entry->src_data[0] = dw[4]; - entry->src_data[1] = dw[5]; - entry->src_data[2] = dw[6]; - entry->src_data[3] = dw[7]; - - /* wptr/rptr are in bytes! */ - ih->rptr += 32; + uint32_t v = 0; + uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; + + ih_regs = &ih->ih_regs; + + /* Rearm IRQ / re-write doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } } /** @@ -289,17 +479,64 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * + * @ih: IH ring buffer to set rptr * Set the IH ring buffer rptr. */ static void navi10_ih_set_rptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { + struct amdgpu_ih_regs *ih_regs; + if (ih->use_doorbell) { /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; WDOORBELL32(ih->doorbell_index, ih->rptr); - } else - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + navi10_ih_irq_rearm(adev, ih); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); + } +} + +/** + * navi10_ih_self_irq - dispatch work for ring 1 and 2 + * + * @adev: amdgpu_device pointer + * @source: irq source + * @entry: IV with WPTR update + * + * Update the WPTR from the IV and schedule work to handle the entries. + */ +static int navi10_ih_self_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t wptr = cpu_to_le32(entry->src_data[0]); + + switch (entry->ring_id) { + case 1: + *adev->irq.ih1.wptr_cpu = wptr; + schedule_work(&adev->irq.ih1_work); + break; + case 2: + *adev->irq.ih2.wptr_cpu = wptr; + schedule_work(&adev->irq.ih2_work); + break; + default: break; + } + return 0; +} + +static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = { + .process = navi10_ih_self_irq, +}; + +static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev) +{ + adev->irq.self_irq.num_types = 0; + adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs; } static int navi10_ih_early_init(void *handle) @@ -307,6 +544,7 @@ static int navi10_ih_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; navi10_ih_set_interrupt_funcs(adev); + navi10_ih_set_self_irq_funcs(adev); return 0; } @@ -316,11 +554,20 @@ static int navi10_ih_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool use_bus_addr; + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, + &adev->irq.self_irq); + + if (r) + return r; + /* use gpu virtual address for ih ring * until ih_checken is programmed to allow * use bus address for ih ring by psp bl */ - use_bus_addr = - (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true; + if ((adev->flags & AMD_IS_APU) || + (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + use_bus_addr = false; + else + use_bus_addr = true; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr); if (r) return r; @@ -328,6 +575,16 @@ static int navi10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; + adev->irq.ih1.ring_size = 0; + adev->irq.ih2.ring_size = 0; + + /* initialize ih control registers offset */ + navi10_ih_init_register_offset(adev); + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + if (r) + return r; + r = amdgpu_irq_init(adev); return r; @@ -337,8 +594,7 @@ static int navi10_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); return 0; } @@ -427,7 +683,7 @@ static int navi10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; navi10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } @@ -467,7 +723,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { static const struct amdgpu_ih_funcs navi10_ih_funcs = { .get_wptr = navi10_ih_get_wptr, - .decode_iv = navi10_ih_decode_iv, + .decode_iv = amdgpu_ih_decode_iv_helper, .set_rptr = navi10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h index 074a9a09c0a7..a5b60c9a2418 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h +++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h @@ -73,6 +73,22 @@ #define SDMA_OP_AQL_COPY 0 #define SDMA_OP_AQL_BARRIER_OR 0 +#define SDMA_GCR_RANGE_IS_PA (1 << 18) +#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16) +#define SDMA_GCR_GL2_WB (1 << 15) +#define SDMA_GCR_GL2_INV (1 << 14) +#define SDMA_GCR_GL2_DISCARD (1 << 13) +#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11) +#define SDMA_GCR_GL2_US (1 << 10) +#define SDMA_GCR_GL1_INV (1 << 9) +#define SDMA_GCR_GLV_INV (1 << 8) +#define SDMA_GCR_GLK_INV (1 << 7) +#define SDMA_GCR_GLK_WB (1 << 6) +#define SDMA_GCR_GLM_INV (1 << 5) +#define SDMA_GCR_GLM_WB (1 << 4) +#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2) +#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0) + /*define for op field*/ #define SDMA_PKT_HEADER_op_offset 0 #define SDMA_PKT_HEADER_op_mask 0x000000FF diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c deleted file mode 100644 index 4ea1e8fbb601..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "navi14_ip_offset.h" - -int navi14_reg_base_init(struct amdgpu_device *adev) -{ - int i; - - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index f3a3fe746222..4ecd2b5808ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -28,11 +28,40 @@ #include "nbio/nbio_2_3_offset.h" #include "nbio/nbio_2_3_sh_mask.h" #include <uapi/linux/kfd_ioctl.h> +#include <linux/pci.h> #define smnPCIE_CONFIG_CNTL 0x11180044 #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 - +#define smnPCIE_LC_CNTL 0x11140280 +#define smnPCIE_LC_CNTL3 0x111402d4 +#define smnPCIE_LC_CNTL6 0x111402ec +#define smnPCIE_LC_CNTL7 0x111402f0 +#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c +#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538 +#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 +#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 +#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c + +#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6 +#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2 +#define mmBIF_SDMA3_DOORBELL_RANGE 0x01d7 +#define mmBIF_SDMA3_DOORBELL_RANGE_BASE_IDX 2 + +#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8 +#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 + +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 + +#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */ +#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) { @@ -44,8 +73,17 @@ static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev) { - u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + u32 tmp; + /* + * guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, + * therefore we force rev_id to 0 (which is the default value) + */ + if (amdgpu_sriov_vf(adev)) { + return 0; + } + + tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -62,15 +100,6 @@ static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); -} - static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); @@ -81,7 +110,9 @@ static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instan int doorbell_size) { u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : - SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); + instance == 1 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE) : + instance == 2 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA2_DOORBELL_RANGE) : + SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA3_DOORBELL_RANGE); u32 doorbell_range = RREG32(reg); @@ -103,7 +134,8 @@ static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instan static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, int doorbell_index, int instance) { - u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); + u32 reg = instance ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE) : + SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); u32 doorbell_range = RREG32(reg); @@ -198,8 +230,11 @@ static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) + return; + def = data = RREG32_PCIE(smnCPM_CONTROL); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) { + if (enable) { data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | @@ -224,8 +259,11 @@ static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev { uint32_t def, data; + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + return; + def = data = RREG32_PCIE(smnPCIE_CNTL2); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { + if (enable) { data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); @@ -290,33 +328,235 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; -static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev) +const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc = { + .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, + .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK, + .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK, + .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK, +}; + +static void nbio_v2_3_init_registers(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); +} + +#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 +#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms +#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms + +static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, + bool enable) { - uint32_t reg; + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); - reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; + if (enable) { + /* Disable ASPM L0s/L1 first */ + data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK); - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; + if (pci_is_thunderbolt_attached(adev->pdev)) + data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + else + data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + + data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + } else { + /* Disbale ASPM L1 */ + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; + /* Disable ASPM TxL0s */ + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + /* Disable ACPI L1 */ + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; } + + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); } -static void nbio_v2_3_init_registers(struct amdgpu_device *adev) +static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) { uint32_t def, data; - def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL); - data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); - data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2); + data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; if (def != data) - WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data); + + def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); + data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; + if (def != data) + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); +} + +static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); + data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL7, data); + + def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); + data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; + if (def != data) + WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5); + data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); + + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); + + def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); + data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | + PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; + data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | + PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL6, data); + + nbio_v2_3_program_ltr(adev); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; + data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5); + data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); +} + +static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev) +{ + uint32_t reg_data = 0; + uint32_t link_width = 0; + + if (!((adev->asic_type >= CHIP_NAVI10) && + (adev->asic_type <= CHIP_NAVI12))) + return; + + reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL); + link_width = (reg_data & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + + /* + * Program PCIE_LC_CNTL6.LC_SPC_MODE_8GT to 0x2 (4 symbols per clock data) + * if link_width is 0x3 (x4) + */ + if (0x3 == link_width) { + reg_data = RREG32_PCIE(smnPCIE_LC_CNTL6); + reg_data &= ~PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK; + reg_data |= (0x2 << PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT); + WREG32_PCIE(smnPCIE_LC_CNTL6, reg_data); + } +} + +static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev) +{ + uint32_t reg_data = 0; + + if (adev->asic_type != CHIP_NAVI10) + return; + + reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL); + reg_data |= PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK; + WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data); +} + +static void nbio_v2_3_clear_doorbell_interrupt(struct amdgpu_device *adev) +{ + uint32_t reg, reg_data; + + if (adev->asic_type != CHIP_SIENNA_CICHLID) + return; + + reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL); + + /* Clear Interrupt Status + */ + if ((reg & BIF_RB_CNTL__RB_ENABLE_MASK) == 0) { + reg = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (reg & BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK) { + reg_data = 1 << BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT; + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, reg_data); + } + } } const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { @@ -326,7 +566,6 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset, .get_rev_id = nbio_v2_3_get_rev_id, .mc_access_enable = nbio_v2_3_mc_access_enable, - .hdp_flush = nbio_v2_3_hdp_flush, .get_memsize = nbio_v2_3_get_memsize, .sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range, .vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range, @@ -338,6 +577,10 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .get_clockgating_state = nbio_v2_3_get_clockgating_state, .ih_control = nbio_v2_3_ih_control, .init_registers = nbio_v2_3_init_registers, - .detect_hw_virt = nbio_v2_3_detect_hw_virt, .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, + .enable_aspm = nbio_v2_3_enable_aspm, + .program_aspm = nbio_v2_3_program_aspm, + .apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa, + .apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa, + .clear_doorbell_interrupt = nbio_v2_3_clear_doorbell_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h index a43b60acf7f6..6074dd3a1ed8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h @@ -27,6 +27,7 @@ #include "soc15_common.h" extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg; +extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc; extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 635d9e1fc0a3..0d2d629e2d6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -29,10 +29,41 @@ #include "nbio/nbio_6_1_sh_mask.h" #include "nbio/nbio_6_1_smn.h" #include "vega10_enum.h" +#include <uapi/linux/kfd_ioctl.h> + +#define smnPCIE_LC_CNTL 0x11140280 +#define smnPCIE_LC_CNTL3 0x111402d4 +#define smnPCIE_LC_CNTL6 0x111402ec +#define smnPCIE_LC_CNTL7 0x111402f0 +#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L +#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL +#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L +#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123530 +#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c +#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 +#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 +#define smnRCC_BIF_STRAP2 0x10123488 +#define smnRCC_BIF_STRAP3 0x1012348c +#define smnRCC_BIF_STRAP5 0x10123494 +#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL +#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L +#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0 +#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10 +#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0 + +static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { - u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -50,18 +81,6 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(NBIO, 0, - mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, - 0); - else - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0); -} - static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE); @@ -114,7 +133,7 @@ static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *ad static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, int doorbell_index) { - u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE); + u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE); if (use_doorbell) { ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); @@ -241,23 +260,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK }; -static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static void nbio_v6_1_init_registers(struct amdgpu_device *adev) { uint32_t def, data; @@ -276,6 +278,111 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) WREG32_PCIE(smnPCIE_CI_CNTL, data); } +static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) +{ + uint32_t def, data; + + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP2); + data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP2, data); + + def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); + data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; + if (def != data) + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); +} + +static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); + data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL7, data); + + def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); + data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; + if (def != data) + WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP3, data); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); + data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); + + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); + + def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); + data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | + PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; + data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | + PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL6, data); + + nbio_v6_1_program_ltr(adev); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; + data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP3, data); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); + data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); +} + const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, @@ -283,7 +390,6 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset, .get_rev_id = nbio_v6_1_get_rev_id, .mc_access_enable = nbio_v6_1_mc_access_enable, - .hdp_flush = nbio_v6_1_hdp_flush, .get_memsize = nbio_v6_1_get_memsize, .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range, .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture, @@ -294,5 +400,6 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { .get_clockgating_state = nbio_v6_1_get_clockgating_state, .ih_control = nbio_v6_1_ih_control, .init_registers = nbio_v6_1_init_registers, - .detect_hw_virt = nbio_v6_1_detect_hw_virt, + .remap_hdp_registers = nbio_v6_1_remap_hdp_registers, + .program_aspm = nbio_v6_1_program_aspm, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index d6cbf26074bc..3c00666a13e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -43,7 +43,7 @@ static void nbio_v7_0_remap_hdp_registers(struct amdgpu_device *adev) static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) { - u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -60,15 +60,6 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); -} - static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); @@ -126,7 +117,7 @@ static void nbio_v7_0_enable_doorbell_selfring_aperture(struct amdgpu_device *ad static void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, int doorbell_index) { - u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE); + u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE); if (use_doorbell) { ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); @@ -280,12 +271,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; -static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { @@ -298,7 +283,6 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset, .get_rev_id = nbio_v7_0_get_rev_id, .mc_access_enable = nbio_v7_0_mc_access_enable, - .hdp_flush = nbio_v7_0_hdp_flush, .get_memsize = nbio_v7_0_get_memsize, .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range, .vcn_doorbell_range = nbio_v7_0_vcn_doorbell_range, @@ -310,6 +294,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { .get_clockgating_state = nbio_v7_0_get_clockgating_state, .ih_control = nbio_v7_0_ih_control, .init_registers = nbio_v7_0_init_registers, - .detect_hw_virt = nbio_v7_0_detect_hw_virt, .remap_hdp_registers = nbio_v7_0_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c new file mode 100644 index 000000000000..8f2a315e7c73 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -0,0 +1,397 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "nbio_v7_2.h" + +#include "nbio/nbio_7_2_0_offset.h" +#include "nbio/nbio_7_2_0_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> + +#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC 0x0015 +#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC_BASE_IDX 2 +#define regBIF_BX0_BIF_FB_EN_YC 0x0100 +#define regBIF_BX0_BIF_FB_EN_YC_BASE_IDX 2 +#define regBIF1_PCIE_MST_CTRL_3 0x4601c6 +#define regBIF1_PCIE_MST_CTRL_3_BASE_IDX 5 +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT \ + 0x1b +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT \ + 0x1c +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK \ + 0x08000000L +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK \ + 0x30000000L +#define regBIF1_PCIE_TX_POWER_CTRL_1 0x460187 +#define regBIF1_PCIE_TX_POWER_CTRL_1_BASE_IDX 5 +#define BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L +#define BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L + +static void nbio_v7_2_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} + +static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev) +{ + u32 tmp; + + if (adev->asic_type == CHIP_YELLOW_CARP) + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC); + else + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); + + tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; + tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; + + return tmp; +} + +static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + if (adev->asic_type == CHIP_YELLOW_CARP) + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, + BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | + BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); + else + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, + BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | + BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); + else + if (adev->asic_type == CHIP_YELLOW_CARP) + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0); + else + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); +} + +static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev) +{ + return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE); +} + +static void nbio_v7_2_sdma_doorbell_range(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, + int doorbell_size) +{ + u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_SDMA0_DOORBELL_RANGE); + u32 doorbell_range = RREG32_PCIE_PORT(reg); + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_SDMA0_DOORBELL_RANGE, + OFFSET, doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_SDMA0_DOORBELL_RANGE, + SIZE, doorbell_size); + } else { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_SDMA0_DOORBELL_RANGE, + SIZE, 0); + } + + WREG32_PCIE_PORT(reg, doorbell_range); +} + +static void nbio_v7_2_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, + int doorbell_index, int instance) +{ + u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE); + u32 doorbell_range = RREG32_PCIE_PORT(reg); + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET, + doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8); + } else { + doorbell_range = REG_SET_FIELD(doorbell_range, + GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0); + } + + WREG32_PCIE_PORT(reg, doorbell_range); +} + +static void nbio_v7_2_enable_doorbell_aperture(struct amdgpu_device *adev, + bool enable) +{ + u32 reg; + + reg = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN); + reg = REG_SET_FIELD(reg, RCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN, + BIF_DOORBELL_APER_EN, enable ? 1 : 0); + + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN, reg); +} + +static void nbio_v7_2_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp = 0; + + if (enable) { + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_EN, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_MODE, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_SIZE, 0); + + WREG32_SOC15(NBIO, 0, + regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, + lower_32_bits(adev->doorbell.base)); + WREG32_SOC15(NBIO, 0, + regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, + upper_32_bits(adev->doorbell.base)); + } + + WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + tmp); +} + + +static void nbio_v7_2_ih_doorbell_range(struct amdgpu_device *adev, + bool use_doorbell, int doorbell_index) +{ + u32 ih_doorbell_range = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_IH_DOORBELL_RANGE)); + + if (use_doorbell) { + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + GDC0_BIF_IH_DOORBELL_RANGE, OFFSET, + doorbell_index); + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + GDC0_BIF_IH_DOORBELL_RANGE, SIZE, + 2); + } else { + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, + GDC0_BIF_IH_DOORBELL_RANGE, SIZE, + 0); + } + + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_IH_DOORBELL_RANGE), + ih_doorbell_range); +} + +static void nbio_v7_2_ih_control(struct amdgpu_device *adev) +{ + u32 interrupt_cntl; + + /* setup interrupt control */ + WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, + adev->dummy_page_addr >> 8); + + interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL); + /* + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, + IH_DUMMY_RD_OVERRIDE, 0); + + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, + IH_REQ_NONSNOOP_EN, 0); + + WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl); +} + +static void nbio_v7_2_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regCPM_CONTROL)); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) { + data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | + CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); + } else { + data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | + CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | + CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); + } + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regCPM_CONTROL), data); +} + +static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (adev->asic_type == CHIP_YELLOW_CARP) { + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK; + else + data &= ~PCIE_CNTL2__SLV_MEM_LS_EN_MASK; + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data); + + data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1)); + def = data; + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + data |= (BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK | + BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK); + else + data &= ~(BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK | + BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1), + data); + } else { + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + else + data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data); + } +} + +static void nbio_v7_2_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + int data; + + /* AMD_CG_SUPPORT_BIF_MGCG */ + data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regCPM_CONTROL)); + if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_BIF_MGCG; + + /* AMD_CG_SUPPORT_BIF_LS */ + data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); + if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_BIF_LS; +} + +static u32 nbio_v7_2_get_hdp_flush_req_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ); +} + +static u32 nbio_v7_2_get_hdp_flush_done_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE); +} + +static u32 nbio_v7_2_get_pcie_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2); +} + +static u32 nbio_v7_2_get_pcie_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2); +} + +static u32 nbio_v7_2_get_pcie_port_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX); +} + +static u32 nbio_v7_2_get_pcie_port_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA); +} + +const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = { + .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK, + .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK, +}; + +static void nbio_v7_2_init_registers(struct amdgpu_device *adev) +{ + uint32_t def, data; + if (adev->asic_type == CHIP_YELLOW_CARP) { + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3)); + data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3, + CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3, + CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data); + } else { + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL)); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, + CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, + CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); + } +} + +const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { + .get_hdp_flush_req_offset = nbio_v7_2_get_hdp_flush_req_offset, + .get_hdp_flush_done_offset = nbio_v7_2_get_hdp_flush_done_offset, + .get_pcie_index_offset = nbio_v7_2_get_pcie_index_offset, + .get_pcie_data_offset = nbio_v7_2_get_pcie_data_offset, + .get_pcie_port_index_offset = nbio_v7_2_get_pcie_port_index_offset, + .get_pcie_port_data_offset = nbio_v7_2_get_pcie_port_data_offset, + .get_rev_id = nbio_v7_2_get_rev_id, + .mc_access_enable = nbio_v7_2_mc_access_enable, + .get_memsize = nbio_v7_2_get_memsize, + .sdma_doorbell_range = nbio_v7_2_sdma_doorbell_range, + .vcn_doorbell_range = nbio_v7_2_vcn_doorbell_range, + .enable_doorbell_aperture = nbio_v7_2_enable_doorbell_aperture, + .enable_doorbell_selfring_aperture = nbio_v7_2_enable_doorbell_selfring_aperture, + .ih_doorbell_range = nbio_v7_2_ih_doorbell_range, + .update_medium_grain_clock_gating = nbio_v7_2_update_medium_grain_clock_gating, + .update_medium_grain_light_sleep = nbio_v7_2_update_medium_grain_light_sleep, + .get_clockgating_state = nbio_v7_2_get_clockgating_state, + .ih_control = nbio_v7_2_ih_control, + .init_registers = nbio_v7_2_init_registers, + .remap_hdp_registers = nbio_v7_2_remap_hdp_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.h new file mode 100644 index 000000000000..a8e8e65648a0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.h @@ -0,0 +1,32 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NBIO_V7_2_H__ +#define __NBIO_V7_2_H__ + +#include "soc15_common.h" + +extern const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg; +extern const struct amdgpu_nbio_funcs nbio_v7_2_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 0db458f9fafc..b8bd03d16dba 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -31,18 +31,40 @@ #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include <uapi/linux/kfd_ioctl.h> +#define smnPCIE_LC_CNTL 0x11140280 +#define smnPCIE_LC_CNTL3 0x111402d4 +#define smnPCIE_LC_CNTL6 0x111402ec +#define smnPCIE_LC_CNTL7 0x111402f0 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c +#define smnRCC_BIF_STRAP3 0x1012348c +#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL +#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L +#define smnRCC_BIF_STRAP5 0x10123494 +#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL +#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c +#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 +#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 +#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538 +#define smnRCC_BIF_STRAP2 0x10123488 +#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L +#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0 +#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10 +#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0 /* * These are nbio v7_4_1 registers mask. Temporarily define these here since * nbio v7_4_1 header is incomplete. */ -#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */ #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L #define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 @@ -52,6 +74,31 @@ #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L + +#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE 0x01d8 +#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE_BASE_IDX 2 +//BIF_MMSCH1_DOORBELL_ALDE_RANGE +#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET__SHIFT 0x2 +#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE__SHIFT 0x10 +#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET_MASK 0x00000FFCL +#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE_MASK 0x001F0000L + +#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015 +#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2 + +#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe +#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2 +#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18 +#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L + +#define mmBIF_INTR_CNTL_ALDE 0x0101 +#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2 + +static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); + static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, @@ -62,7 +109,12 @@ static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) { - u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + u32 tmp; + + if (adev->asic_type == CHIP_ALDEBARAN) + tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_ALDE); + else + tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -79,15 +131,6 @@ static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); } -static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else - amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); -} - static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev) { return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); @@ -98,10 +141,10 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan { u32 reg, doorbell_range; - if (instance < 2) + if (instance < 2) { reg = instance + SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE); - else + } else { /* * These registers address of SDMA2~7 is not consecutive * from SDMA0~1. Need plus 4 dwords offset. @@ -109,9 +152,19 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0 * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4 * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8 ++ * BIF_SDMA4_DOORBELL_RANGE: ++ * ARCTURUS: 0x3be0 ++ * ALDEBARAN: 0x3be4 */ - reg = instance + 0x4 + - SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE); + if (adev->asic_type == CHIP_ALDEBARAN && instance == 4) + reg = instance + 0x4 + 0x1 + + SOC15_REG_OFFSET(NBIO, 0, + mmBIF_SDMA0_DOORBELL_RANGE); + else + reg = instance + 0x4 + + SOC15_REG_OFFSET(NBIO, 0, + mmBIF_SDMA0_DOORBELL_RANGE); + } doorbell_range = RREG32(reg); @@ -130,9 +183,12 @@ static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do u32 reg; u32 doorbell_range; - if (instance) - reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE); - else + if (instance) { + if (adev->asic_type == CHIP_ALDEBARAN) + reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE_ALDE); + else + reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE); + } else reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); doorbell_range = RREG32(reg); @@ -182,7 +238,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, if (use_doorbell) { ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); - ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2); + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4); } else ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0); @@ -281,30 +337,28 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK, .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, - .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK, - .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, - .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, - .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, - .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, - .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, }; -static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) -{ - uint32_t reg; - - reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER); - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} +const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = { + .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, + .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK, + .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK, + .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK, +}; static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { @@ -314,17 +368,59 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev) static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) { uint32_t bif_doorbell_intr_cntl; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (adev->asic_type == CHIP_ALDEBARAN) + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE); + else + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); - bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); if (REG_GET_FIELD(bif_doorbell_intr_cntl, BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { /* driver has to clear the interrupt status when bif ring is disabled */ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_CLEAR, 1); - WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl); + else + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + if (!ras->disable_ras_err_cnt_harvest) { + /* + * clear error status after ras_controller_intr + * according to hw team and count ue number + * for query + */ + nbio_v7_4_query_ras_error_count(adev, &err_data); + + /* logging on error cnt and printing for awareness */ + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + + if (err_data.ce_count) + dev_info(adev->dev, "%ld correctable hardware " + "errors detected in %s block, " + "no user action is needed.\n", + obj->err_data.ce_count, + get_ras_block_str(adev->nbio.ras_if)); + + if (err_data.ue_count) + dev_info(adev->dev, "%ld uncorrectable hardware " + "errors detected in %s block\n", + obj->err_data.ue_count, + get_ras_block_str(adev->nbio.ras_if)); + } - amdgpu_ras_global_ras_isr(adev); + dev_info(adev->dev, "RAS controller interrupt triggered " + "by NBIF error\n"); + + /* ras_controller_int is dedicated for nbif ras error, + * not the global interrupt for sync flood + */ + amdgpu_ras_reset_gpu(adev); } } @@ -332,14 +428,22 @@ static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_d { uint32_t bif_doorbell_intr_cntl; - bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (adev->asic_type == CHIP_ALDEBARAN) + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE); + else + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_intr_cntl, BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { /* driver has to clear the interrupt status when bif ring is disabled */ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); - WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl); + else + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); amdgpu_ras_global_ras_isr(adev); } @@ -357,14 +461,23 @@ static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev, */ uint32_t bif_intr_cntl; - bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (adev->asic_type == CHIP_ALDEBARAN) + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE); + else + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { /* set interrupt vector select bit to 0 to select * vetcor 1 for bare metal case */ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, BIF_INTR_CNTL, RAS_INTR_VEC_SEL, 0); - WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl); + else + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + } return 0; @@ -393,14 +506,22 @@ static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *ade */ uint32_t bif_intr_cntl; - bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (adev->asic_type == CHIP_ALDEBARAN) + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE); + else + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { /* set interrupt vector select bit to 0 to select * vetcor 1 for bare metal case */ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, BIF_INTR_CNTL, RAS_INTR_VEC_SEL, 0); - WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl); + else + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); } return 0; @@ -441,10 +562,8 @@ static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev) r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, &adev->nbio.ras_controller_irq); - if (r) - return r; - return 0; + return r; } static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) @@ -461,25 +580,36 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, &adev->nbio.ras_err_event_athub_irq); - if (r) - return r; - return 0; + return r; } +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030 +#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020 + static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - uint32_t global_sts, central_sts, int_eoi; + uint32_t global_sts, central_sts, int_eoi, parity_sts; uint32_t corr, fatal, non_fatal; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + if (adev->asic_type == CHIP_ALDEBARAN) + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE); + else + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrNonFatal); + if (adev->asic_type == CHIP_ALDEBARAN) + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE); + else + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); + if (corr) err_data->ce_count++; if (fatal) @@ -487,8 +617,21 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, if (corr || fatal || non_fatal) { central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); + /* clear error status register */ - WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts); + else + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + + if (fatal) + { + /* clear parity fatal error indication field */ + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts); + else + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts); + } if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, BIFL_RasContller_Intr_Recv)) { @@ -505,10 +648,129 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev, bool enable) { - WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL, + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE, + DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); + else + WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL, DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } +const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = { + .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, + .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, + .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, + .query_ras_error_count = nbio_v7_4_query_ras_error_count, + .ras_late_init = amdgpu_nbio_ras_late_init, + .ras_fini = amdgpu_nbio_ras_fini, +}; + +static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) +{ + uint32_t def, data; + + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP2); + data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP2, data); + + def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); + data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; + if (def != data) + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); +} + +static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); + data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL7, data); + + def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); + data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; + if (def != data) + WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP3, data); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); + data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); + + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); + + def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); + data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | + PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; + data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | + PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL6, data); + + nbio_v7_4_program_ltr(adev); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; + data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP3, data); + + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); + data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_PCIE(smnRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); +} + const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, @@ -516,7 +778,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset, .get_rev_id = nbio_v7_4_get_rev_id, .mc_access_enable = nbio_v7_4_mc_access_enable, - .hdp_flush = nbio_v7_4_hdp_flush, .get_memsize = nbio_v7_4_get_memsize, .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range, .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range, @@ -529,12 +790,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .get_clockgating_state = nbio_v7_4_get_clockgating_state, .ih_control = nbio_v7_4_ih_control, .init_registers = nbio_v7_4_init_registers, - .detect_hw_virt = nbio_v7_4_detect_hw_virt, .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, - .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, - .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, - .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, - .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, - .query_ras_error_count = nbio_v7_4_query_ras_error_count, - .ras_late_init = amdgpu_nbio_ras_late_init, + .program_aspm = nbio_v7_4_program_aspm, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h index b1ac82872752..cc5692db6f98 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -27,6 +27,8 @@ #include "soc15_common.h" extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; +extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald; extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; +extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 0ba66bef5746..febc903adf58 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -25,6 +25,8 @@ #include <linux/module.h> #include <linux/pci.h> +#include <drm/amdgpu_drm.h> + #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_ih.h" @@ -32,15 +34,12 @@ #include "amdgpu_vce.h" #include "amdgpu_ucode.h" #include "amdgpu_psp.h" -#include "amdgpu_smu.h" #include "atom.h" #include "amd_pcie.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" -#include "hdp/hdp_5_0_0_offset.h" -#include "hdp/hdp_5_0_0_sh_mask.h" -#include "smuio/smuio_11_0_0_offset.h" +#include "mp/mp_11_0_offset.h" #include "soc15.h" #include "soc15_common.h" @@ -48,44 +47,249 @@ #include "gfxhub_v2_0.h" #include "mmhub_v2_0.h" #include "nbio_v2_3.h" +#include "nbio_v7_2.h" +#include "hdp_v5_0.h" #include "nv.h" #include "navi10_ih.h" #include "gfx_v10_0.h" #include "sdma_v5_0.h" +#include "sdma_v5_2.h" #include "vcn_v2_0.h" -#include "dce_virtual.h" +#include "jpeg_v2_0.h" +#include "vcn_v3_0.h" +#include "jpeg_v3_0.h" +#include "amdgpu_vkms.h" #include "mes_v10_1.h" #include "mxgpu_nv.h" +#include "smuio_v11_0.h" +#include "smuio_v11_0_6.h" static const struct amd_ip_funcs nv_common_ip_funcs; +/* Navi */ +static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, +}; + +static const struct amdgpu_video_codecs nv_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array), + .codec_array = nv_video_codecs_encode_array, +}; + +/* Navi1x */ +static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs nv_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array), + .codec_array = nv_video_codecs_decode_array, +}; + +/* Sienna Cichlid */ +static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs sc_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array), + .codec_array = sc_video_codecs_decode_array, +}; + +/* SRIOV Sienna Cichlid, not const since data is controlled by host */ +static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, +}; + +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + .codec_array = sriov_sc_video_codecs_encode_array, +}; + +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array), + .codec_array = sriov_sc_video_codecs_decode_array, +}; + +/* Beige Goby*/ +static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs bg_video_codecs_decode = { + .codec_count = ARRAY_SIZE(bg_video_codecs_decode_array), + .codec_array = bg_video_codecs_decode_array, +}; + +static const struct amdgpu_video_codecs bg_video_codecs_encode = { + .codec_count = 0, + .codec_array = NULL, +}; + +/* Yellow Carp*/ +static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, +}; + +static const struct amdgpu_video_codecs yc_video_codecs_decode = { + .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array), + .codec_array = yc_video_codecs_decode_array, +}; + +static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, + const struct amdgpu_video_codecs **codecs) +{ + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(3, 0, 0): + if (amdgpu_sriov_vf(adev)) { + if (encode) + *codecs = &sriov_sc_video_codecs_encode; + else + *codecs = &sriov_sc_video_codecs_decode; + } else { + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &sc_video_codecs_decode; + } + return 0; + case IP_VERSION(3, 0, 16): + case IP_VERSION(3, 0, 2): + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &sc_video_codecs_decode; + return 0; + case IP_VERSION(3, 1, 1): + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &yc_video_codecs_decode; + return 0; + case IP_VERSION(3, 0, 33): + if (encode) + *codecs = &bg_video_codecs_encode; + else + *codecs = &bg_video_codecs_decode; + return 0; + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &nv_video_codecs_decode; + return 0; + default: + return -EINVAL; + } +} + /* * Indirect registers accessor */ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) { - unsigned long flags, address, data; - u32 r; + unsigned long address, data; + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); + + return amdgpu_device_indirect_rreg(adev, address, data, reg); +} + +static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long address, data; + + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); + + amdgpu_device_indirect_wreg(adev, address, data, reg, v); +} + +static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg) +{ + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); + return amdgpu_device_indirect_rreg64(adev, address, data, reg); +} + +static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags, address, data; + u32 r; + address = adev->nbio.funcs->get_pcie_port_index_offset(adev); + data = adev->nbio.funcs->get_pcie_port_data_offset(adev); + spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg); + WREG32(address, reg * 4); (void)RREG32(address); r = RREG32(data); spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); return r; } -static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) { - unsigned long flags, address, data; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); + amdgpu_device_indirect_wreg64(adev, address, data, reg, v); +} + +static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags, address, data; + + address = adev->nbio.funcs->get_pcie_port_index_offset(adev); + data = adev->nbio.funcs->get_pcie_port_data_offset(adev); + spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg); + WREG32(address, reg * 4); (void)RREG32(address); WREG32(data, v); (void)RREG32(data); @@ -140,7 +344,7 @@ void nv_grbm_select(struct amdgpu_device *adev, grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); + WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); } static void nv_vga_set_state(struct amdgpu_device *adev, bool state) @@ -159,6 +363,7 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + u32 rom_index_offset, rom_data_offset; if (bios == NULL) return false; @@ -171,11 +376,16 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + rom_index_offset = + adev->smuio.funcs->get_rom_index_offset(adev); + rom_data_offset = + adev->smuio.funcs->get_rom_data_offset(adev); + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } @@ -187,10 +397,8 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, -#if 0 /* TODO: will set it when SDMA header is available */ { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, -#endif { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, @@ -243,7 +451,8 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { en = &nv_allowed_read_registers[i]; - if (reg_offset != + if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */ + reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) continue; @@ -255,50 +464,23 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -#if 0 -static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) -{ - u32 i; - - dev_info(adev->dev, "GPU pci config reset\n"); - - /* disable BM */ - pci_clear_master(adev->pdev); - /* reset */ - amdgpu_pci_config_reset(adev); - - udelay(100); - - /* wait for asic to come out of reset */ - for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = nbio_v2_3_get_memsize(adev); - if (memsize != 0xffffffff) - break; - udelay(1); - } - -} -#endif - -static int nv_asic_mode1_reset(struct amdgpu_device *adev) +static int nv_asic_mode2_reset(struct amdgpu_device *adev) { u32 i; int ret = 0; amdgpu_atombios_scratch_regs_engine_hung(adev, true); - dev_info(adev->dev, "GPU mode1 reset\n"); - /* disable BM */ pci_clear_master(adev->pdev); - pci_save_state(adev->pdev); + amdgpu_device_cache_pci_state(adev->pdev); - ret = psp_gpu_reset(adev); + ret = amdgpu_dpm_mode2_reset(adev); if (ret) - dev_err(adev->dev, "GPU mode1 reset failed\n"); + dev_err(adev->dev, "GPU mode2 reset failed\n"); - pci_restore_state(adev->pdev); + amdgpu_device_load_pci_state(adev->pdev); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { @@ -317,36 +499,55 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) static enum amd_reset_method nv_asic_reset_method(struct amdgpu_device *adev) { - struct smu_context *smu = &adev->smu; - - if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) - return AMD_RESET_METHOD_BACO; - else + if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || + amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || + amdgpu_reset_method == AMD_RESET_METHOD_BACO || + amdgpu_reset_method == AMD_RESET_METHOD_PCI) + return amdgpu_reset_method; + + if (amdgpu_reset_method != -1) + dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", + amdgpu_reset_method); + + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 5, 0): + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): + return AMD_RESET_METHOD_MODE2; + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): return AMD_RESET_METHOD_MODE1; + default: + if (amdgpu_dpm_is_baco_supported(adev)) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_MODE1; + } } static int nv_asic_reset(struct amdgpu_device *adev) { - - /* FIXME: it doesn't work since vega10 */ -#if 0 - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - nv_gpu_pci_config_reset(adev); - - amdgpu_atombios_scratch_regs_engine_hung(adev, false); -#endif int ret = 0; - struct smu_context *smu = &adev->smu; - if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); - ret = smu_baco_reset(smu); - } else { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); - ret = nv_asic_mode1_reset(adev); + switch (nv_asic_reset_method(adev)) { + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + ret = amdgpu_device_pci_reset(adev); + break; + case AMD_RESET_METHOD_BACO: + dev_info(adev->dev, "BACO reset\n"); + ret = amdgpu_dpm_baco_reset(adev); + break; + case AMD_RESET_METHOD_MODE2: + dev_info(adev->dev, "MODE2 reset\n"); + ret = nv_asic_mode2_reset(adev); + break; + default: + dev_info(adev->dev, "MODE1 reset\n"); + ret = amdgpu_device_mode1_reset(adev); + break; } return ret; @@ -381,11 +582,13 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev) static void nv_program_aspm(struct amdgpu_device *adev) { - - if (amdgpu_aspm == 0) + if (!amdgpu_aspm) return; - /* todo */ + if (!(adev->flags & AMD_IS_APU) && + (adev->nbio.funcs->program_aspm)) + adev->nbio.funcs->program_aspm(adev); + } static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, @@ -395,7 +598,7 @@ static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } -static const struct amdgpu_ip_block_version nv_common_ip_block = +const struct amdgpu_ip_block_version nv_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, @@ -404,107 +607,9 @@ static const struct amdgpu_ip_block_version nv_common_ip_block = .funcs = &nv_common_ip_funcs, }; -static int nv_reg_base_init(struct amdgpu_device *adev) +void nv_set_virt_ops(struct amdgpu_device *adev) { - int r; - - if (amdgpu_discovery) { - r = amdgpu_discovery_reg_base_init(adev); - if (r) { - DRM_WARN("failed to init reg base from ip discovery table, " - "fallback to legacy init method\n"); - goto legacy_init; - } - - return 0; - } - -legacy_init: - switch (adev->asic_type) { - case CHIP_NAVI10: - navi10_reg_base_init(adev); - break; - case CHIP_NAVI14: - navi14_reg_base_init(adev); - break; - case CHIP_NAVI12: - navi12_reg_base_init(adev); - break; - default: - return -EINVAL; - } - - return 0; -} - -int nv_set_ip_blocks(struct amdgpu_device *adev) -{ - int r; - - /* Set IP register base before any HW register access */ - r = nv_reg_base_init(adev); - if (r) - return r; - - adev->nbio.funcs = &nbio_v2_3_funcs; - adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - - adev->nbio.funcs->detect_hw_virt(adev); - - if (amdgpu_sriov_vf(adev)) - adev->virt.ops = &xgpu_nv_virt_ops; - - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - if (adev->enable_mes) - amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); - break; - case CHIP_NAVI12: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - break; - default: - return -EINVAL; - } - - return 0; + adev->virt.ops = &xgpu_nv_virt_ops; } static uint32_t nv_get_rev_id(struct amdgpu_device *adev) @@ -512,37 +617,13 @@ static uint32_t nv_get_rev_id(struct amdgpu_device *adev) return adev->nbio.funcs->get_rev_id(adev); } -static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) -{ - adev->nbio.funcs->hdp_flush(adev, ring); -} - -static void nv_invalidate_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) { - WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); - } else { - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); - } -} - static bool nv_need_full_reset(struct amdgpu_device *adev) { return true; } -static void nv_get_pcie_usage(struct amdgpu_device *adev, - uint64_t *count0, - uint64_t *count1) -{ - /*TODO*/ -} - static bool nv_need_reset_on_init(struct amdgpu_device *adev) { -#if 0 u32 sol_reg; if (adev->flags & AMD_IS_APU) @@ -554,8 +635,7 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev) sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); if (sol_reg) return true; -#endif - /* TODO: re-enable it when mode1 reset is functional */ + return false; } @@ -584,8 +664,11 @@ static void nv_init_doorbell_index(struct amdgpu_device *adev) adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; + adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING; adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; + adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2; + adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3; adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; @@ -598,6 +681,28 @@ static void nv_init_doorbell_index(struct amdgpu_device *adev) adev->doorbell_index.sdma_doorbell_range = 20; } +static void nv_pre_asic_init(struct amdgpu_device *adev) +{ +} + +static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, + bool enter) +{ + if (enter) + amdgpu_gfx_rlc_enter_safe_mode(adev); + else + amdgpu_gfx_rlc_exit_safe_mode(adev); + + if (adev->gfx.funcs->update_perfmon_mgcg) + adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); + + if (!(adev->flags & AMD_IS_APU) && + (adev->nbio.funcs->enable_aspm)) + adev->nbio.funcs->enable_aspm(adev, !enter); + + return 0; +} + static const struct amdgpu_asic_funcs nv_asic_funcs = { .read_disabled_bios = &nv_read_disabled_bios, @@ -610,13 +715,14 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .set_uvd_clocks = &nv_set_uvd_clocks, .set_vce_clocks = &nv_set_vce_clocks, .get_config_memsize = &nv_get_config_memsize, - .flush_hdp = &nv_flush_hdp, - .invalidate_hdp = &nv_invalidate_hdp, .init_doorbell_index = &nv_init_doorbell_index, .need_full_reset = &nv_need_full_reset, - .get_pcie_usage = &nv_get_pcie_usage, .need_reset_on_init = &nv_need_reset_on_init, .get_pcie_replay_count = &nv_get_pcie_replay_count, + .supports_baco = &amdgpu_dpm_is_baco_supported, + .pre_asic_init = &nv_pre_asic_init, + .update_umd_stable_pstate = &nv_update_umd_stable_pstate, + .query_video_codecs = &nv_query_video_codecs, }; static int nv_common_early_init(void *handle) @@ -630,6 +736,10 @@ static int nv_common_early_init(void *handle) adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; adev->pcie_wreg = &nv_pcie_wreg; + adev->pcie_rreg64 = &nv_pcie_rreg64; + adev->pcie_wreg64 = &nv_pcie_wreg64; + adev->pciep_rreg = &nv_pcie_port_rreg; + adev->pciep_wreg = &nv_pcie_port_wreg; /* TODO: will add them during VCN v2 implementation */ adev->uvd_ctx_rreg = NULL; @@ -642,8 +752,11 @@ static int nv_common_early_init(void *handle) adev->rev_id = nv_get_rev_id(adev); adev->external_rev_id = 0xff; - switch (adev->asic_type) { - case CHIP_NAVI10: + /* TODO: split the GC and PG flags based on the relevant IP version for which + * they are relevant. + */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_IH_CG | @@ -656,14 +769,16 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0x1; break; - case CHIP_NAVI14: + case IP_VERSION(10, 1, 1): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_IH_CG | @@ -676,13 +791,15 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 20; break; - case CHIP_NAVI12: + case IP_VERSION(10, 1, 2): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CGCG | @@ -697,17 +814,166 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | - AMD_CG_SUPPORT_VCN_MGCG; + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_ATHUB; + /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, + * as a consequence, the rev_id and external_rev_id are wrong. + * workaround it by hardcoding rev_id to 0 (default value). + */ + if (amdgpu_sriov_vf(adev)) + adev->rev_id = 0; adev->external_rev_id = adev->rev_id + 0xa; break; + case IP_VERSION(10, 3, 0): + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_MC_LS; + adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | + AMD_PG_SUPPORT_ATHUB | + AMD_PG_SUPPORT_MMHUB; + if (amdgpu_sriov_vf(adev)) { + /* hypervisor control CG and PG enablement */ + adev->cg_flags = 0; + adev->pg_flags = 0; + } + adev->external_rev_id = adev->rev_id + 0x28; + break; + case IP_VERSION(10, 3, 2): + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_IH_CG; + adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | + AMD_PG_SUPPORT_ATHUB | + AMD_PG_SUPPORT_MMHUB; + adev->external_rev_id = adev->rev_id + 0x32; + break; + case IP_VERSION(10, 3, 1): + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_GFX_FGCG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_JPEG_MGCG; + adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG; + if (adev->apu_flags & AMD_APU_IS_VANGOGH) + adev->external_rev_id = adev->rev_id + 0x01; + break; + case IP_VERSION(10, 3, 4): + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_IH_CG; + adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | + AMD_PG_SUPPORT_ATHUB | + AMD_PG_SUPPORT_MMHUB; + adev->external_rev_id = adev->rev_id + 0x3c; + break; + case IP_VERSION(10, 3, 5): + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG; + adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_ATHUB | + AMD_PG_SUPPORT_MMHUB; + adev->external_rev_id = adev->rev_id + 0x46; + break; + case IP_VERSION(10, 3, 3): + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_FGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ATHUB_MGCG | + AMD_CG_SUPPORT_ATHUB_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; + adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG; + if (adev->pdev->device == 0x1681) + adev->external_rev_id = 0x20; + else + adev->external_rev_id = adev->rev_id + 0x01; + break; + case IP_VERSION(10, 1, 3): + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x82; + break; default: /* FIXME: not supported yet */ return -EINVAL; } + if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) + adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG); + if (amdgpu_sriov_vf(adev)) { amdgpu_virt_init_setting(adev); xgpu_nv_mailbox_set_irq_funcs(adev); @@ -720,8 +986,12 @@ static int nv_common_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); + amdgpu_virt_update_sriov_video_codec(adev, + sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array)); + } return 0; } @@ -745,6 +1015,12 @@ static int nv_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->nbio.funcs->apply_lc_spc_mode_wa) + adev->nbio.funcs->apply_lc_spc_mode_wa(adev); + + if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa) + adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev); + /* enable pcie gen2/3 link */ nv_pcie_gen3_enable(adev); /* enable aspm */ @@ -802,110 +1078,6 @@ static int nv_common_soft_reset(void *handle) return 0; } -static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, - bool enable) -{ - uint32_t hdp_clk_cntl, hdp_clk_cntl1; - uint32_t hdp_mem_pwr_cntl; - - if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_HDP_DS | - AMD_CG_SUPPORT_HDP_SD))) - return; - - hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); - hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); - - /* Before doing clock/power mode switch, - * forced on IPH & RC clock */ - hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, - IPH_MEM_CLK_SOFT_OVERRIDE, 1); - hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, - RC_MEM_CLK_SOFT_OVERRIDE, 1); - WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); - - /* HDP 5.0 doesn't support dynamic power mode switch, - * disable clock and power gating before any changing */ - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_CTRL_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_LS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_DS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_SD_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_CTRL_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_LS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_DS_EN, 0); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, - RC_MEM_POWER_SD_EN, 0); - WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); - - /* only one clock gating mode (LS/DS/SD) can be enabled */ - if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_LS_EN, enable); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - RC_MEM_POWER_LS_EN, enable); - } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_DS_EN, enable); - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - RC_MEM_POWER_DS_EN, enable); - } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - IPH_MEM_POWER_SD_EN, enable); - /* RC should not use shut down mode, fallback to ds */ - hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, - HDP_MEM_POWER_CTRL, - RC_MEM_POWER_DS_EN, enable); - } - - WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); - - /* restore IPH & RC clock override after clock/power mode changing */ - WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); -} - -static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, - bool enable) -{ - uint32_t hdp_clk_cntl; - - if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) - return; - - hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); - - if (enable) { - hdp_clk_cntl &= - ~(uint32_t) - (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); - } else { - hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; - } - - WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); -} - static int nv_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -914,18 +1086,22 @@ static int nv_common_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 3, 1): + case IP_VERSION(2, 3, 2): + case IP_VERSION(3, 3, 0): + case IP_VERSION(3, 3, 1): + case IP_VERSION(3, 3, 2): + case IP_VERSION(3, 3, 3): adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); - nv_update_hdp_mem_power_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); - nv_update_hdp_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); + adev->hdp.funcs->update_clock_gating(adev, + state == AMD_CG_STATE_GATE); + adev->smuio.funcs->update_rom_clock_gating(adev, + state == AMD_CG_STATE_GATE); break; default: break; @@ -943,31 +1119,15 @@ static int nv_common_set_powergating_state(void *handle, static void nv_common_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - uint32_t tmp; if (amdgpu_sriov_vf(adev)) *flags = 0; adev->nbio.funcs->get_clockgating_state(adev, flags); - /* AMD_CG_SUPPORT_HDP_MGCG */ - tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); - if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | - HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) - *flags |= AMD_CG_SUPPORT_HDP_MGCG; - - /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ - tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); - if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) - *flags |= AMD_CG_SUPPORT_HDP_LS; - else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) - *flags |= AMD_CG_SUPPORT_HDP_DS; - else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) - *flags |= AMD_CG_SUPPORT_HDP_SD; + adev->hdp.funcs->get_clock_gating_state(adev, flags); + + adev->smuio.funcs->get_clock_gating_state(adev, flags); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 82e6cb432f3d..83e9782aef39 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -26,10 +26,10 @@ #include "nbio_v2_3.h" +extern const struct amdgpu_ip_block_version nv_common_ip_block; + void nv_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); -int nv_set_ip_blocks(struct amdgpu_device *adev); -int navi10_reg_base_init(struct amdgpu_device *adev); -int navi14_reg_base_init(struct amdgpu_device *adev); -int navi12_reg_base_init(struct amdgpu_device *adev); +void nv_set_virt_ops(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h index 1de984647dbb..fd6b58243b03 100644 --- a/drivers/gpu/drm/amd/amdgpu/nvd.h +++ b/drivers/gpu/drm/amd/amdgpu/nvd.h @@ -256,6 +256,54 @@ #define PACKET3_BLK_CNTX_UPDATE 0x53 #define PACKET3_INCR_UPDT_STATE 0x55 #define PACKET3_ACQUIRE_MEM 0x58 +/* 1. HEADER + * 2. COHER_CNTL [30:0] + * 2.1 ENGINE_SEL [31:31] + * 2. COHER_SIZE [31:0] + * 3. COHER_SIZE_HI [7:0] + * 4. COHER_BASE_LO [31:0] + * 5. COHER_BASE_HI [23:0] + * 7. POLL_INTERVAL [15:0] + * 8. GCR_CNTL [18:0] + */ +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(x) ((x) << 0) + /* + * 0:NOP + * 1:ALL + * 2:RANGE + * 3:FIRST_LAST + */ +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_RANGE(x) ((x) << 2) + /* + * 0:ALL + * 1:reserved + * 2:RANGE + * 3:FIRST_LAST + */ +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(x) ((x) << 4) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(x) ((x) << 5) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_WB(x) ((x) << 6) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(x) ((x) << 7) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(x) ((x) << 8) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(x) ((x) << 9) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_US(x) ((x) << 10) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_RANGE(x) ((x) << 11) + /* + * 0:ALL + * 1:VOL + * 2:RANGE + * 3:FIRST_LAST + */ +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_DISCARD(x) ((x) << 13) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(x) ((x) << 14) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(x) ((x) << 15) +#define PACKET3_ACQUIRE_MEM_GCR_CNTL_SEQ(x) ((x) << 16) + /* + * 0: PARALLEL + * 1: FORWARD + * 2: REVERSE + */ +#define PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA (1 << 18) #define PACKET3_REWIND 0x59 #define PACKET3_INTERRUPT 0x5A #define PACKET3_GEN_PDEPTE 0x5B @@ -306,6 +354,7 @@ #define PACKET3_GET_LOD_STATS 0x8E #define PACKET3_DRAW_MULTI_PREAMBLE 0x8F #define PACKET3_FRAME_CONTROL 0x90 +# define FRAME_TMZ (1 << 0) # define FRAME_CMD(x) ((x) << 28) /* * x=0: tmz_begin diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h deleted file mode 100644 index 8463245f424f..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_SMC_H -#define PP_SMC_H - -#pragma pack(push, 1) - -#define PPSMC_SWSTATE_FLAG_DC 0x01 -#define PPSMC_SWSTATE_FLAG_UVD 0x02 -#define PPSMC_SWSTATE_FLAG_VCE 0x04 -#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 - -#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 -#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 -#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff - -#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 -#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 -#define PPSMC_SYSTEMFLAG_GDDR5 0x04 -#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO 0x40 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 -#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 -#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x02 - -#define PPSMC_DISPLAY_WATERMARK_LOW 0 -#define PPSMC_DISPLAY_WATERMARK_HIGH 1 - -#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 -#define PPSMC_STATEFLAG_POWERBOOST 0x02 -#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 -#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 - -#define FDO_MODE_HARDWARE 0 -#define FDO_MODE_PIECE_WISE_LINEAR 1 - -enum FAN_CONTROL { - FAN_CONTROL_FUZZY, - FAN_CONTROL_TABLE -}; - -#define PPSMC_Result_OK ((uint8_t)0x01) -#define PPSMC_Result_Failed ((uint8_t)0xFF) - -typedef uint8_t PPSMC_Result; - -#define PPSMC_MSG_Halt ((uint8_t)0x10) -#define PPSMC_MSG_Resume ((uint8_t)0x11) -#define PPSMC_MSG_ZeroLevelsDisabled ((uint8_t)0x13) -#define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14) -#define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15) -#define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16) -#define PPSMC_MSG_RunningOnAC ((uint8_t)0x17) -#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20) -#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40) -#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41) -#define PPSMC_MSG_ForceHigh ((uint8_t)0x42) -#define PPSMC_MSG_ForceMediumOrHigh ((uint8_t)0x43) -#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51) -#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52) -#define PPSMC_MSG_EnableCac ((uint8_t)0x53) -#define PPSMC_MSG_DisableCac ((uint8_t)0x54) -#define PPSMC_TDPClampingActive ((uint8_t)0x59) -#define PPSMC_TDPClampingInactive ((uint8_t)0x5A) -#define PPSMC_StartFanControl ((uint8_t)0x5B) -#define PPSMC_StopFanControl ((uint8_t)0x5C) -#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) -#define PPSMC_NoDisplay ((uint8_t)0x5D) -#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) -#define PPSMC_HasDisplay ((uint8_t)0x5E) -#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) -#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) -#define PPSMC_MSG_EnableULV ((uint8_t)0x62) -#define PPSMC_MSG_DisableULV ((uint8_t)0x63) -#define PPSMC_MSG_EnterULV ((uint8_t)0x64) -#define PPSMC_MSG_ExitULV ((uint8_t)0x65) -#define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E) -#define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F) -#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A) -#define PPSMC_FlushDataCache ((uint8_t)0x80) -#define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82) -#define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83) -#define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84) -#define PPSMC_MSG_EnableDTE ((uint8_t)0x87) -#define PPSMC_MSG_DisableDTE ((uint8_t)0x88) -#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) -#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) -#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) - -/* CI/KV/KB */ -#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) -#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) -#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) -#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) -#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) -#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) -#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) -#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) -#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) -#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) -#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) -#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) -#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) -#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) -#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) -#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) -#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) -#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) -#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) -#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) -#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) -#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) -#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) -#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) -#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) -#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) -#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) -#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) -#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) -#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) -#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) -#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) -#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) -#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) -#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) -#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) -#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) -#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) -#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) -#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) -#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) -#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) -#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) -#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) -#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) -#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) -#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) -#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) -#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) -#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) -#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) -#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) - -#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) -#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) - -#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) -#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) - -/* TN */ -#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) -#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) -#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) -#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109) -#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a) -#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e) -#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) -#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) -#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) -#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) -#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) -#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121) -#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) - -#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) -#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) -#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) -#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) -#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) - -typedef uint16_t PPSMC_Msg; - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 74a9fe8e0cfb..dd0dce254901 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -31,6 +31,9 @@ #define GFX_CMD_RESERVED_MASK 0x7FF00000 #define GFX_CMD_RESPONSE_MASK 0x80000000 +/* USBC PD FW version retrieval command */ +#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000 + /* TEE Gfx Command IDs for the register interface. * Command ID must be between 0x00010000 and 0x000F0000. */ @@ -44,7 +47,7 @@ enum psp_gfx_crtl_cmd_id GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */ GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */ GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */ - GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */ + GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */ GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */ @@ -80,36 +83,39 @@ struct psp_gfx_ctrl */ #define GFX_FLAG_RESPONSE 0x80000000 -/* Gbr IH registers ID */ -enum ih_reg_id { - IH_RB = 0, // IH_RB_CNTL - IH_RB_RNG1 = 1, // IH_RB_CNTL_RING1 - IH_RB_RNG2 = 2, // IH_RB_CNTL_RING2 +/* TEE Gfx Command IDs for the ring buffer interface. */ +enum psp_gfx_cmd_id +{ + GFX_CMD_ID_LOAD_TA = 0x00000001, /* load TA */ + GFX_CMD_ID_UNLOAD_TA = 0x00000002, /* unload TA */ + GFX_CMD_ID_INVOKE_CMD = 0x00000003, /* send command to TA */ + GFX_CMD_ID_LOAD_ASD = 0x00000004, /* load ASD Driver */ + GFX_CMD_ID_SETUP_TMR = 0x00000005, /* setup TMR region */ + GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */ + GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */ + GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */ + GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */ + GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */ + GFX_CMD_ID_PROG_REG = 0x0000000B, /* program regs */ + GFX_CMD_ID_GET_FW_ATTESTATION = 0x0000000F, /* Query GPUVA of the Fw Attestation DB */ + /* IDs upto 0x1F are reserved for older programs (Raven, Vega 10/12/20) */ + GFX_CMD_ID_LOAD_TOC = 0x00000020, /* Load TOC and obtain TMR size */ + GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */ + GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */ }; -/* Command to setup Gibraltar IH register */ -struct psp_gfx_cmd_gbr_ih_reg { - uint32_t reg_value; /* Value to be set to the IH_RB_CNTL... register*/ - enum ih_reg_id reg_id; /* ID of the register */ +/* PSP boot config sub-commands */ +enum psp_gfx_boot_config_cmd +{ + BOOTCFG_CMD_SET = 1, /* Set boot configuration settings */ + BOOTCFG_CMD_GET = 2, /* Get boot configuration settings */ + BOOTCFG_CMD_INVALIDATE = 3 /* Reset current boot configuration settings to VBIOS defaults */ }; -/* TEE Gfx Command IDs for the ring buffer interface. */ -enum psp_gfx_cmd_id +/* PSP boot config bitmask values */ +enum psp_gfx_boot_config { - GFX_CMD_ID_LOAD_TA = 0x00000001, /* load TA */ - GFX_CMD_ID_UNLOAD_TA = 0x00000002, /* unload TA */ - GFX_CMD_ID_INVOKE_CMD = 0x00000003, /* send command to TA */ - GFX_CMD_ID_LOAD_ASD = 0x00000004, /* load ASD Driver */ - GFX_CMD_ID_SETUP_TMR = 0x00000005, /* setup TMR region */ - GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */ - GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */ - GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */ - GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */ - GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */ - GFX_CMD_ID_PROG_REG = 0x0000000B, /* program regs */ - /* IDs upto 0x1F are reserved for older programs (Raven, Vega 10/12/20) */ - GFX_CMD_ID_LOAD_TOC = 0x00000020, /* Load TOC and obtain TMR size */ - GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */ + BOOT_CONFIG_GECC = 0x1, }; /* Command to load Trusted Application binary into PSP OS. */ @@ -178,10 +184,19 @@ struct psp_gfx_cmd_setup_tmr uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */ uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */ uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */ + union { + struct { + uint32_t sriov_enabled:1; /* whether the device runs under SR-IOV*/ + uint32_t virt_phy_addr:1; /* driver passes both virtual and physical address to PSP*/ + uint32_t reserved:30; + } bitfield; + uint32_t tmr_flags; + }; + uint32_t system_phy_addr_lo; /* bits [31:0] of system physical address of TMR buffer (must be 4 KB aligned) */ + uint32_t system_phy_addr_hi; /* bits [63:32] of system physical address of TMR buffer */ }; - /* FW types for GFX_CMD_ID_LOAD_IP_FW command. Limit 31. */ enum psp_gfx_fw_type { GFX_FW_TYPE_NONE = 0, /* */ @@ -210,7 +225,7 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_UVD1 = 23, /* UVD1 VG-20 */ GFX_FW_TYPE_TOC = 24, /* TOC NV-10 */ GFX_FW_TYPE_RLC_P = 25, /* RLC P NV */ - GFX_FW_TYPE_RLX6 = 26, /* RLX6 NV */ + GFX_FW_TYPE_RLC_IRAM = 26, /* RLC_IRAM NV */ GFX_FW_TYPE_GLOBAL_TAP_DELAYS = 27, /* GLOBAL TAP DELAYS NV */ GFX_FW_TYPE_SE0_TAP_DELAYS = 28, /* SE0 TAP DELAYS NV */ GFX_FW_TYPE_SE1_TAP_DELAYS = 29, /* SE1 TAP DELAYS NV */ @@ -232,7 +247,7 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_ACCUM_CTRL_RAM = 45, /* ACCUM CTRL RAM NV */ GFX_FW_TYPE_RLCP_CAM = 46, /* RLCP CAM NV */ GFX_FW_TYPE_RLC_SPP_CAM_EXT = 47, /* RLC SPP CAM EXT NV */ - GFX_FW_TYPE_RLX6_DRAM_BOOT = 48, /* RLX6 DRAM BOOT NV */ + GFX_FW_TYPE_RLC_DRAM_BOOT = 48, /* RLC DRAM BOOT NV */ GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV + RN */ GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV + RN */ GFX_FW_TYPE_DMUB = 51, /* DMUB RN */ @@ -242,6 +257,8 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */ GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */ GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */ + GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */ + GFX_FW_TYPE_REG_LIST = 67, /* REG_LIST MI */ GFX_FW_TYPE_MAX }; @@ -279,6 +296,15 @@ struct psp_gfx_cmd_load_toc uint32_t toc_size; /* FW buffer size in bytes */ }; +/* Dynamic boot configuration */ +struct psp_gfx_cmd_boot_cfg +{ + uint32_t timestamp; /* calendar time as number of seconds */ + enum psp_gfx_boot_config_cmd sub_cmd; /* sub-command indicating how to process command data */ + uint32_t boot_config; /* dynamic boot configuration bitmask */ + uint32_t boot_config_valid; /* dynamic boot configuration valid bits bitmask */ +}; + /* All GFX ring buffer commands. */ union psp_gfx_commands { @@ -291,6 +317,31 @@ union psp_gfx_commands struct psp_gfx_cmd_reg_prog cmd_setup_reg_prog; struct psp_gfx_cmd_setup_tmr cmd_setup_vmr; struct psp_gfx_cmd_load_toc cmd_load_toc; + struct psp_gfx_cmd_boot_cfg boot_cfg; +}; + +struct psp_gfx_uresp_reserved +{ + uint32_t reserved[8]; +}; + +/* Command-specific response for Fw Attestation Db */ +struct psp_gfx_uresp_fwar_db_info +{ + uint32_t fwar_db_addr_lo; + uint32_t fwar_db_addr_hi; +}; + +/* Command-specific response for boot config. */ +struct psp_gfx_uresp_bootcfg { + uint32_t boot_cfg; /* boot config data */ +}; + +/* Union of command-specific responses for GPCOM ring. */ +union psp_gfx_uresp { + struct psp_gfx_uresp_reserved reserved; + struct psp_gfx_uresp_bootcfg boot_cfg; + struct psp_gfx_uresp_fwar_db_info fwar_db_info; }; /* Structure of GFX Response buffer. @@ -305,9 +356,11 @@ struct psp_gfx_resp uint32_t fw_addr_hi; /* +12 bits [63:32] of FW address within TMR (in response to cmd_load_ip_fw command) */ uint32_t tmr_size; /* +16 size of the TMR to be reserved including MM fw and Gfx fw in response to cmd_load_toc command */ - uint32_t reserved[3]; + uint32_t reserved[11]; + + union psp_gfx_uresp uresp; /* +64 response union containing command-specific responses */ - /* total 32 bytes */ + /* total 96 bytes */ }; /* Structure of Command buffer pointed by psp_gfx_rb_frame.cmd_buf_addr_hi @@ -361,4 +414,11 @@ struct psp_gfx_rb_frame /* total 64 bytes */ }; +#define PSP_ERR_UNKNOWN_COMMAND 0x00000100 + +enum tee_error_code { + TEE_SUCCESS = 0x00000000, + TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A, +}; + #endif /* _PSP_TEE_GFX_IF_H_ */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index b345e69ba246..ed2293686f0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -50,15 +50,14 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) const char *chip_name; char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *hdr; const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); switch (adev->asic_type) { case CHIP_RAVEN: - if (adev->rev_id >= 0x8) + if (adev->apu_flags & AMD_APU_IS_RAVEN2) chip_name = "raven2"; - else if (adev->pdev->device == 0x15d8) + else if (adev->apu_flags & AMD_APU_IS_PICASSO) chip_name = "picasso"; else chip_name = "raven"; @@ -66,22 +65,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + err = psp_init_asd_microcode(psp, chip_name); if (err) goto out; - err = amdgpu_ucode_validate(adev->psp.asd_fw); - if (err) - goto out; - - hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)hdr + - le32_to_cpu(hdr->header.ucode_array_offset_bytes); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) { @@ -97,23 +84,31 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; - adev->psp.ta_hdcp_ucode_version = - le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); - adev->psp.ta_hdcp_ucode_size = - le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); - adev->psp.ta_hdcp_start_addr = + adev->psp.hdcp_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->hdcp.fw_version); + adev->psp.hdcp_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->hdcp.size_bytes); + adev->psp.hdcp_context.context.bin_desc.start_addr = (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + adev->psp.dtm_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->dtm.fw_version); + adev->psp.dtm_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->dtm.size_bytes); + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + + le32_to_cpu(ta_hdr->dtm.offset_bytes); + + adev->psp.securedisplay_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->securedisplay.fw_version); + adev->psp.securedisplay_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->securedisplay.size_bytes); + adev->psp.securedisplay_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + + le32_to_cpu(ta_hdr->securedisplay.offset_bytes); - adev->psp.ta_dtm_ucode_version = - le32_to_cpu(ta_hdr->ta_dtm_ucode_version); - adev->psp.ta_dtm_ucode_size = - le32_to_cpu(ta_hdr->ta_dtm_size_bytes); - adev->psp.ta_dtm_start_addr = - (uint8_t *)adev->psp.ta_hdcp_start_addr + - le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); } return 0; @@ -126,8 +121,6 @@ out: dev_err(adev->dev, "psp v10.0: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; } return err; @@ -230,181 +223,24 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v10_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) +static int psp_v10_0_mode1_reset(struct psp_context *psp) { - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; + DRM_INFO("psp mode 1 reset not supported now! \n"); + return -EINVAL; } -static int -psp_v10_0_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) +static uint32_t psp_v10_0_ring_get_wptr(struct psp_context *psp) { - int ret = 0; - - switch(ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } + struct amdgpu_device *adev = psp->adev; - return ret; + return RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); } -static bool psp_v10_0_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) +static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value) { - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; struct amdgpu_device *adev = psp->adev; - err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (!ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - - -static int psp_v10_0_mode1_reset(struct psp_context *psp) -{ - DRM_INFO("psp mode 1 reset not supported now! \n"); - return -EINVAL; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } static const struct psp_funcs psp_v10_0_funcs = { @@ -413,9 +249,9 @@ static const struct psp_funcs psp_v10_0_funcs = { .ring_create = psp_v10_0_ring_create, .ring_stop = psp_v10_0_ring_stop, .ring_destroy = psp_v10_0_ring_destroy, - .cmd_submit = psp_v10_0_cmd_submit, - .compare_sram_data = psp_v10_0_compare_sram_data, .mode1_reset = psp_v10_0_mode1_reset, + .ring_get_wptr = psp_v10_0_ring_get_wptr, + .ring_set_wptr = psp_v10_0_ring_set_wptr, }; void psp_v10_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index ffeaa2f5588d..2176ef85f137 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -22,9 +22,12 @@ #include <linux/firmware.h> #include <linux/module.h> +#include <linux/vmalloc.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_psp.h" +#include "amdgpu_ras.h" #include "amdgpu_ucode.h" #include "soc15_common.h" #include "psp_v11_0.h" @@ -43,13 +46,26 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); MODULE_FIRMWARE("amdgpu/navi10_sos.bin"); MODULE_FIRMWARE("amdgpu/navi10_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi10_ta.bin"); MODULE_FIRMWARE("amdgpu/navi14_sos.bin"); MODULE_FIRMWARE("amdgpu/navi14_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi14_ta.bin"); MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi12_ta.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_asd.bin"); +MODULE_FIRMWARE("amdgpu/vangogh_toc.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sos.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_ta.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_sos.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -61,104 +77,67 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 +/* For large FW files the time to complete can be very long */ +#define USBC_PD_POLLING_LIMIT_S 240 + +/* Read USB-PD from LFB */ +#define GFX_CMD_USB_PD_USE_LFB 0x480 + static int psp_v11_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; const char *chip_name; - char fw_name[30]; + char fw_name[PSP_FW_NAME_LEN]; int err = 0; - const struct psp_firmware_header_v1_0 *sos_hdr; - const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; - const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; - const struct psp_firmware_header_v1_0 *asd_hdr; const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_VEGA20: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 2): chip_name = "vega20"; break; - case CHIP_NAVI10: + case IP_VERSION(11, 0, 0): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): chip_name = "navi14"; break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): chip_name = "navi12"; break; - case CHIP_ARCTURUS: + case IP_VERSION(11, 0, 4): chip_name = "arcturus"; break; + case IP_VERSION(11, 0, 7): + chip_name = "sienna_cichlid"; + break; + case IP_VERSION(11, 0, 11): + chip_name = "navy_flounder"; + break; + case IP_VERSION(11, 5, 0): + chip_name = "vangogh"; + break; + case IP_VERSION(11, 0, 12): + chip_name = "dimgrey_cavefish"; + break; + case IP_VERSION(11, 0, 13): + chip_name = "beige_goby"; + break; default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); - err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->psp.sos_fw); - if (err) - goto out; - - sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; - amdgpu_ucode_print_psp_hdr(&sos_hdr->header); - - switch (sos_hdr->header.header_version_major) { - case 1: - adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); - adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); - adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); - adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); - adev->psp.sys_start_addr = (uint8_t *)sos_hdr + - le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); - adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr->sos_offset_bytes); - if (sos_hdr->header.header_version_minor == 1) { - sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; - adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); - adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); - adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); - } - if (sos_hdr->header.header_version_minor == 2) { - sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); - adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); - } - break; - default: - dev_err(adev->dev, - "Unsupported psp sos firmware\n"); - err = -EINVAL; - goto out; - } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); - if (err) - goto out1; - - err = amdgpu_ucode_validate(adev->psp.asd_fw); - if (err) - goto out1; - - asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)asd_hdr + - le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); - - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 4): + err = psp_init_sos_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_asd_microcode(psp, chip_name); + if (err) + return err; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) { @@ -172,20 +151,86 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); - adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); - adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + + adev->psp.xgmi_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->xgmi.fw_version); + adev->psp.xgmi_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->xgmi.size_bytes); + adev->psp.xgmi_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version); - adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes); - adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr + - le32_to_cpu(ta_hdr->ta_ras_offset_bytes); + adev->psp.ras_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->ras.fw_version); + adev->psp.ras_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->ras.size_bytes); + adev->psp.ras_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + + le32_to_cpu(ta_hdr->ras.offset_bytes); + } + break; + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + err = psp_init_sos_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_asd_microcode(psp, chip_name); + if (err) + return err; + if (amdgpu_sriov_vf(adev)) + break; + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; + adev->psp.hdcp_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->hdcp.fw_version); + adev->psp.hdcp_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->hdcp.size_bytes); + adev->psp.hdcp_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu( + ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.dtm_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->dtm.fw_version); + adev->psp.dtm_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->dtm.size_bytes); + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context + .bin_desc.start_addr + + le32_to_cpu(ta_hdr->dtm.offset_bytes); } break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + err = psp_init_sos_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_ta_microcode(psp, chip_name); + if (err) + return err; + break; + case IP_VERSION(11, 5, 0): + err = psp_init_asd_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_toc_microcode(psp, chip_name); + if (err) + return err; break; default: BUG(); @@ -196,18 +241,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) out2: release_firmware(adev->psp.ta_fw); adev->psp.ta_fw = NULL; -out1: - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; -out: - dev_err(adev->dev, - "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - return err; } +static int psp_v11_0_wait_for_bootloader(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + int ret; + int retry_loop; + + for (retry_loop = 0; retry_loop < 10; retry_loop++) { + /* Wait for bootloader to signify that is + ready having bit 31 of C2PMSG_35 set to 1 */ + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, + 0x80000000, + false); + + if (ret == 0) + return 0; + } + + return ret; +} + static bool psp_v11_0_is_sos_alive(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -227,22 +286,15 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) /* Check tOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - if (psp_v11_0_is_sos_alive(psp)) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); + if (psp_v11_0_is_sos_alive(psp)) return 0; - } - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP KDB binary to memory */ - memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size); + psp_copy_fw(psp, psp->kdb.start_addr, psp->kdb.size_bytes); /* Provide the PSP KDB to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -251,9 +303,38 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, psp_gfxdrv_command_reg); - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); + + return ret; +} + +static int psp_v11_0_bootloader_load_spl(struct psp_context *psp) +{ + int ret; + uint32_t psp_gfxdrv_command_reg = 0; + struct amdgpu_device *adev = psp->adev; + + /* Check tOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + if (psp_v11_0_is_sos_alive(psp)) + return 0; + + ret = psp_v11_0_wait_for_bootloader(psp); + if (ret) + return ret; + + /* Copy PSP SPL binary to memory */ + psp_copy_fw(psp, psp->spl.start_addr, psp->spl.size_bytes); + + /* Provide the PSP SPL to bootloader */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, + (uint32_t)(psp->fw_pri_mc_addr >> 20)); + psp_gfxdrv_command_reg = PSP_BL__LOAD_TOS_SPL_TABLE; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, + psp_gfxdrv_command_reg); + + ret = psp_v11_0_wait_for_bootloader(psp); return ret; } @@ -267,22 +348,15 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - if (psp_v11_0_is_sos_alive(psp)) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); + if (psp_v11_0_is_sos_alive(psp)) return 0; - } - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP System Driver binary to memory */ - memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); + psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes); /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -294,8 +368,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); return ret; } @@ -312,16 +385,12 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) if (psp_v11_0_is_sos_alive(psp)) return 0; - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_v11_0_wait_for_bootloader(psp); if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy Secure OS binary to PSP memory */ - memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); + psp_copy_fw(psp, psp->sos.start_addr, psp->sos.size_bytes); /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -339,37 +408,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) return ret; } -static void psp_v11_0_reroute_ih(struct psp_context *psp) -{ - struct amdgpu_device *adev = psp->adev; - uint32_t tmp; - - /* Change IH ring for VMC */ - tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); - - mdelay(20); - psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); - - /* Change IH ring for UMC */ - tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b); - tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); - - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); - - mdelay(20); - psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); -} - static int psp_v11_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -377,8 +415,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp, struct psp_ring *ring; struct amdgpu_device *adev = psp->adev; - psp_v11_0_reroute_ih(psp); - ring = &psp->km_ring; ring->ring_type = ring_type; @@ -398,13 +434,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp, return 0; } -static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) -{ - if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) - return true; - return false; -} - static int psp_v11_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -412,7 +441,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; /* Write the ring destroy command*/ - if (psp_v11_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); else @@ -423,7 +452,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) */ - if (psp_v11_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 0x80000000, 0x80000000, false); else @@ -441,7 +470,8 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; - if (psp_v11_0_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(adev)) { + ring->ring_wptr = 0; ret = psp_v11_0_ring_stop(psp, ring_type); if (ret) { DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); @@ -519,195 +549,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v11_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v11_0_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v11_0_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - -static int -psp_v11_0_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) -{ - int ret = 0; - - switch (ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - if (adev->asic_type < CHIP_NAVI10) { - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - } else { - *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10; - *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10; - } - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - if (adev->asic_type < CHIP_NAVI10) { - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - } else { - *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10; - *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10; - } - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static bool psp_v11_0_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) -{ - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; - struct amdgpu_device *adev = psp->adev; - - err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - static int psp_v11_0_mode1_reset(struct psp_context *psp) { int ret; @@ -742,176 +583,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) return 0; } -/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready. - * For now, return success and hack the hive_id so high level code can - * start testing - */ -static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp, - int number_devices, struct psp_xgmi_topology_info *topology) -{ - struct ta_xgmi_shared_memory *xgmi_cmd; - struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; - struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; - int i; - int ret; - - if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) - return -EINVAL; - - xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; - memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); - - /* Fill in the shared memory with topology information as input */ - topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; - xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; - topology_info_input->num_nodes = number_devices; - - for (i = 0; i < topology_info_input->num_nodes; i++) { - topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; - topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; - topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; - topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; - } - - /* Invoke xgmi ta to get the topology information */ - ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); - if (ret) - return ret; - - /* Read the output topology information from the shared memory */ - topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; - topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; - for (i = 0; i < topology->num_nodes; i++) { - topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; - topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; - topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; - topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; - } - - return 0; -} - -static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, - int number_devices, struct psp_xgmi_topology_info *topology) -{ - struct ta_xgmi_shared_memory *xgmi_cmd; - struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; - int i; - - if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) - return -EINVAL; - - xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; - memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); - - topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; - xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; - topology_info_input->num_nodes = number_devices; - - for (i = 0; i < topology_info_input->num_nodes; i++) { - topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; - topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; - topology_info_input->nodes[i].is_sharing_enabled = 1; - topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; - } - - /* Invoke xgmi ta to set topology information */ - return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); -} - -static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) -{ - struct ta_xgmi_shared_memory *xgmi_cmd; - int ret; - - xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; - memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); - - xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; - - /* Invoke xgmi ta to get hive id */ - ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); - if (ret) - return ret; - - *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; - - return 0; -} - -static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) -{ - struct ta_xgmi_shared_memory *xgmi_cmd; - int ret; - - xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; - memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); - - xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; - - /* Invoke xgmi ta to get the node id */ - ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); - if (ret) - return ret; - - *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; - - return 0; -} - -static int psp_v11_0_ras_trigger_error(struct psp_context *psp, - struct ta_ras_trigger_error_input *info) -{ - struct ta_ras_shared_memory *ras_cmd; - int ret; - - if (!psp->ras.ras_initialized) - return -EINVAL; - - ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; - memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); - - ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; - ras_cmd->ras_in_message.trigger_error = *info; - - ret = psp_ras_invoke(psp, ras_cmd->cmd_id); - if (ret) - return -EINVAL; - - return ras_cmd->ras_status; -} - -static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr) -{ -#if 0 - // not support yet. - struct ta_ras_shared_memory *ras_cmd; - int ret; - - if (!psp->ras.ras_initialized) - return -EINVAL; - - ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; - memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); - - ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON; - ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr; - - ret = psp_ras_invoke(psp, ras_cmd->cmd_id); - if (ret) - return -EINVAL; - - return ras_cmd->ras_status; -#else - return -EINVAL; -#endif -} - -static int psp_v11_0_rlc_autoload_start(struct psp_context *psp) -{ - return psp_rlc_autoload_start(psp); -} - static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) { int ret; @@ -943,53 +614,18 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) return ret; } -static void psp_v11_0_memory_training_fini(struct psp_context *psp) -{ - struct psp_memory_training_context *ctx = &psp->mem_train_ctx; - - ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; - kfree(ctx->sys_cache); - ctx->sys_cache = NULL; -} - -static int psp_v11_0_memory_training_init(struct psp_context *psp) -{ - int ret; - struct psp_memory_training_context *ctx = &psp->mem_train_ctx; - - if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { - DRM_DEBUG("memory training is not supported!\n"); - return 0; - } - - ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); - if (ctx->sys_cache == NULL) { - DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); - ret = -ENOMEM; - goto Err_out; - } - - DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", - ctx->train_data_size, - ctx->p2c_train_data_offset, - ctx->c2p_train_data_offset); - ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; - return 0; - -Err_out: - psp_v11_0_memory_training_fini(psp); - return ret; -} - /* - * save and restore proces + * save and restore process */ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) { - int ret; - uint32_t p2c_header[4]; struct psp_memory_training_context *ctx = &psp->mem_train_ctx; - uint32_t *pcache = (uint32_t*)ctx->sys_cache; + uint32_t *pcache = (uint32_t *)ctx->sys_cache; + struct amdgpu_device *adev = psp->adev; + uint32_t p2c_header[4]; + uint32_t sz; + void *buf; + int ret, idx; if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { DRM_DEBUG("Memory training is not supported.\n"); @@ -1004,7 +640,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return 0; } - amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", pcache[0], pcache[1], pcache[2], pcache[3], p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); @@ -1041,10 +677,44 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) DRM_DEBUG("Memory training ops:%x.\n", ops); if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { - ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); - if (ret) { - DRM_ERROR("Send long training msg failed.\n"); - return ret; + /* + * Long training will encroach a certain amount on the bottom of VRAM; + * save the content from the bottom of VRAM to system memory + * before training, and restore it after training to avoid + * VRAM corruption. + */ + sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; + + if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { + DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", + adev->gmc.visible_vram_size, + adev->mman.aper_base_kaddr); + return -EINVAL; + } + + buf = vmalloc(sz); + if (!buf) { + DRM_ERROR("failed to allocate system memory.\n"); + return -ENOMEM; + } + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); + ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); + if (ret) { + DRM_ERROR("Send long training msg failed.\n"); + vfree(buf); + drm_dev_exit(idx); + return ret; + } + + memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); + adev->hdp.funcs->flush_hdp(adev, NULL); + vfree(buf); + drm_dev_exit(idx); + } else { + vfree(buf); + return -ENODEV; } } @@ -1068,29 +738,105 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return 0; } +static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) + data = psp->km_ring.ring_wptr; + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + psp->km_ring.ring_wptr = value; + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + +static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t reg_status; + int ret, i = 0; + + /* + * LFB address which is aligned to 1MB address and has to be + * right-shifted by 20 so that LFB address can be passed on a 32-bit C2P + * register + */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the address */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16)); + + /* FW load takes very long time */ + do { + msleep(1000); + reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35); + + if (reg_status & 0x80000000) + goto done; + + } while (++i < USBC_PD_POLLING_LIMIT_S); + + return -ETIME; +done: + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = 0x%04x\n", + reg_status & 0xFFFF); + return -EIO; + } + + return 0; +} + +static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (!ret) + *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36); + + return ret; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, + .bootloader_load_spl = psp_v11_0_bootloader_load_spl, .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv, .bootloader_load_sos = psp_v11_0_bootloader_load_sos, .ring_init = psp_v11_0_ring_init, .ring_create = psp_v11_0_ring_create, .ring_stop = psp_v11_0_ring_stop, .ring_destroy = psp_v11_0_ring_destroy, - .cmd_submit = psp_v11_0_cmd_submit, - .compare_sram_data = psp_v11_0_compare_sram_data, .mode1_reset = psp_v11_0_mode1_reset, - .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, - .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, - .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, - .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, - .support_vmr_ring = psp_v11_0_support_vmr_ring, - .ras_trigger_error = psp_v11_0_ras_trigger_error, - .ras_cure_posion = psp_v11_0_ras_cure_posion, - .rlc_autoload_start = psp_v11_0_rlc_autoload_start, - .mem_training_init = psp_v11_0_memory_training_init, - .mem_training_fini = psp_v11_0_memory_training_fini, .mem_training = psp_v11_0_memory_training, + .ring_get_wptr = psp_v11_0_ring_get_wptr, + .ring_set_wptr = psp_v11_0_ring_set_wptr, + .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw, + .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c new file mode 100644 index 000000000000..ff13e1beb49b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c @@ -0,0 +1,208 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_psp.h" +#include "amdgpu_ucode.h" +#include "soc15_common.h" +#include "psp_v11_0_8.h" + +#include "mp/mp_11_0_8_offset.h" + +static int psp_v11_0_8_ring_init(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring; + struct amdgpu_device *adev = psp->adev; + + ring = &psp->km_ring; + + ring->ring_type = ring_type; + + /* allocate 4k Page of Local Frame Buffer memory for ring */ + ring->ring_size = 0x1000; + ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + if (ret) { + ring->ring_size = 0; + return ret; + } + + return 0; +} + +static int psp_v11_0_8_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + /* Write the ring destroy command*/ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + } else { + /* Write the ring destroy command*/ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + } + + return ret; +} + +static int psp_v11_0_8_ring_create(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + unsigned int psp_ring_reg = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + ret = psp_v11_0_8_ring_stop(psp, ring_type); + if (ret) { + DRM_ERROR("psp_v11_0_8_ring_stop_sriov failed!\n"); + return ret; + } + + /* Write low address of the ring to C2PMSG_102 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); + /* Write high address of the ring to C2PMSG_103 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg); + + /* Write the ring initialization command to C2PMSG_101 */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_INIT_GPCOM_RING); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_101 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x8000FFFF, false); + + } else { + /* Wait for sOS ready for ring creation */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + if (ret) { + DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); + return ret; + } + + /* Write low address of the ring to C2PMSG_69 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); + /* Write high address of the ring to C2PMSG_70 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg); + /* Write size of ring to C2PMSG_71 */ + psp_ring_reg = ring->ring_size; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg); + /* Write the ring initialization command to C2PMSG_64 */ + psp_ring_reg = ring_type; + psp_ring_reg = psp_ring_reg << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, false); + } + + return ret; +} + +static int psp_v11_0_8_ring_destroy(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + ret = psp_v11_0_8_ring_stop(psp, ring_type); + if (ret) + DRM_ERROR("Fail to stop psp ring\n"); + + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + + return ret; +} + +static uint32_t psp_v11_0_8_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v11_0_8_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + +static const struct psp_funcs psp_v11_0_8_funcs = { + .ring_init = psp_v11_0_8_ring_init, + .ring_create = psp_v11_0_8_ring_create, + .ring_stop = psp_v11_0_8_ring_stop, + .ring_destroy = psp_v11_0_8_ring_destroy, + .ring_get_wptr = psp_v11_0_8_ring_get_wptr, + .ring_set_wptr = psp_v11_0_8_ring_set_wptr, +}; + +void psp_v11_0_8_set_psp_funcs(struct psp_context *psp) +{ + psp->funcs = &psp_v11_0_8_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.h b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.h new file mode 100644 index 000000000000..890377a5afe0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.h @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __PSP_V11_0_8_H__ +#define __PSP_V11_0_8_H__ + +#include "amdgpu_psp.h" + +void psp_v11_0_8_set_psp_funcs(struct psp_context *psp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 8f553f6f92d6..a2588200ea58 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -38,6 +38,10 @@ #include "oss/osssys_4_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/renoir_asd.bin"); +MODULE_FIRMWARE("amdgpu/renoir_ta.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin"); + /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -47,39 +51,68 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) const char *chip_name; char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *asd_hdr; - + const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); switch (adev->asic_type) { case CHIP_RENOIR: - chip_name = "renoir"; + if (adev->apu_flags & AMD_APU_IS_RENOIR) + chip_name = "renoir"; + else + chip_name = "green_sardine"; break; default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); - if (err) - goto out1; - - err = amdgpu_ucode_validate(adev->psp.asd_fw); + err = psp_init_asd_microcode(psp, chip_name); if (err) - goto out1; - - asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)asd_hdr + - le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); + return err; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v12.0: Failed to load firmware \"%s\"\n", + fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out; + + ta_hdr = (const struct ta_firmware_header_v1_0 *) + adev->psp.ta_fw->data; + adev->psp.hdcp_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->hdcp.fw_version); + adev->psp.hdcp_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->hdcp.size_bytes); + adev->psp.hdcp_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.dtm_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->dtm.fw_version); + adev->psp.dtm_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->dtm.size_bytes); + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + + le32_to_cpu(ta_hdr->dtm.offset_bytes); + } return 0; -out1: - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; +out: + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + if (err) { + dev_err(adev->dev, + "psp v12.0: Failed to load firmware \"%s\"\n", + fw_name); + } return err; } @@ -95,11 +128,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) * are already been loaded. */ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - printk("sos fw version = 0x%x.\n", psp->sos_fw_version); + if (sol_reg) return 0; - } /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), @@ -107,10 +137,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP System Driver binary to memory */ - memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); + psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes); /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -148,10 +176,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy Secure OS binary to PSP memory */ - memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); + psp_copy_fw(psp, psp->sos.start_addr, psp->sos.size_bytes); /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -228,13 +254,6 @@ static int psp_v12_0_ring_init(struct psp_context *psp, return 0; } -static bool psp_v12_0_support_vmr_ring(struct psp_context *psp) -{ - if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) - return true; - return false; -} - static int psp_v12_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -243,7 +262,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp, struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; - if (psp_v12_0_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(psp->adev)) { /* Write low address of the ring to C2PMSG_102 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); @@ -295,7 +314,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; /* Write the ring destroy command*/ - if (psp_v12_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); else @@ -306,7 +325,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) */ - if (psp_v12_0_support_vmr_ring(psp)) + if (amdgpu_sriov_vf(adev)) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 0x80000000, 0x80000000, false); else @@ -334,185 +353,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v12_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v12_0_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v12_0_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - -static int -psp_v12_0_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) -{ - int ret = 0; - - switch (ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static bool psp_v12_0_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) -{ - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; - struct amdgpu_device *adev = psp->adev; - - err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - static int psp_v12_0_mode1_reset(struct psp_context *psp) { int ret; @@ -547,6 +387,30 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) return 0; } +static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v12_0_funcs = { .init_microcode = psp_v12_0_init_microcode, .bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv, @@ -555,9 +419,9 @@ static const struct psp_funcs psp_v12_0_funcs = { .ring_create = psp_v12_0_ring_create, .ring_stop = psp_v12_0_ring_stop, .ring_destroy = psp_v12_0_ring_destroy, - .cmd_submit = psp_v12_0_cmd_submit, - .compare_sram_data = psp_v12_0_compare_sram_data, .mode1_reset = psp_v12_0_mode1_reset, + .ring_get_wptr = psp_v12_0_ring_get_wptr, + .ring_set_wptr = psp_v12_0_ring_set_wptr, }; void psp_v12_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c new file mode 100644 index 000000000000..17655bc6d2f1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -0,0 +1,463 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_psp.h" +#include "amdgpu_ucode.h" +#include "soc15_common.h" +#include "psp_v13_0.h" + +#include "mp/mp_13_0_2_offset.h" +#include "mp/mp_13_0_2_sh_mask.h" + +MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin"); + +/* For large FW files the time to complete can be very long */ +#define USBC_PD_POLLING_LIMIT_S 240 + +/* Read USB-PD from LFB */ +#define GFX_CMD_USB_PD_USE_LFB 0x480 + +static int psp_v13_0_init_microcode(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + const char *chip_name; + int err = 0; + + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 2): + chip_name = "aldebaran"; + break; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): + chip_name = "yellow_carp"; + break; + default: + BUG(); + } + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 2): + err = psp_init_sos_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_ta_microcode(&adev->psp, chip_name); + if (err) + return err; + break; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): + err = psp_init_asd_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_toc_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_ta_microcode(psp, chip_name); + if (err) + return err; + break; + default: + BUG(); + } + + return 0; +} + +static bool psp_v13_0_is_sos_alive(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + + return sol_reg != 0x0; +} + +static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + int ret; + int retry_loop; + + for (retry_loop = 0; retry_loop < 10; retry_loop++) { + /* Wait for bootloader to signify that is + ready having bit 31 of C2PMSG_35 set to 1 */ + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, + 0x80000000, + false); + + if (ret == 0) + return 0; + } + + return ret; +} + +static int psp_v13_0_bootloader_load_component(struct psp_context *psp, + struct psp_bin_desc *bin_desc, + enum psp_bootloader_cmd bl_cmd) +{ + int ret; + uint32_t psp_gfxdrv_command_reg = 0; + struct amdgpu_device *adev = psp->adev; + + /* Check tOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + if (psp_v13_0_is_sos_alive(psp)) + return 0; + + ret = psp_v13_0_wait_for_bootloader(psp); + if (ret) + return ret; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + + /* Copy PSP KDB binary to memory */ + memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes); + + /* Provide the PSP KDB to bootloader */ + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, + (uint32_t)(psp->fw_pri_mc_addr >> 20)); + psp_gfxdrv_command_reg = bl_cmd; + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, + psp_gfxdrv_command_reg); + + ret = psp_v13_0_wait_for_bootloader(psp); + + return ret; +} + +static int psp_v13_0_bootloader_load_kdb(struct psp_context *psp) +{ + return psp_v13_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE); +} + +static int psp_v13_0_bootloader_load_sysdrv(struct psp_context *psp) +{ + return psp_v13_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV); +} + +static int psp_v13_0_bootloader_load_soc_drv(struct psp_context *psp) +{ + return psp_v13_0_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV); +} + +static int psp_v13_0_bootloader_load_intf_drv(struct psp_context *psp) +{ + return psp_v13_0_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV); +} + +static int psp_v13_0_bootloader_load_dbg_drv(struct psp_context *psp) +{ + return psp_v13_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV); +} + +static int psp_v13_0_bootloader_load_sos(struct psp_context *psp) +{ + int ret; + unsigned int psp_gfxdrv_command_reg = 0; + struct amdgpu_device *adev = psp->adev; + + /* Check sOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + if (psp_v13_0_is_sos_alive(psp)) + return 0; + + ret = psp_v13_0_wait_for_bootloader(psp); + if (ret) + return ret; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + + /* Copy Secure OS binary to PSP memory */ + memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes); + + /* Provide the PSP secure OS to bootloader */ + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, + (uint32_t)(psp->fw_pri_mc_addr >> 20)); + psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV; + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, + psp_gfxdrv_command_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81), + RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), + 0, true); + + return ret; +} + +static int psp_v13_0_ring_init(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring; + struct amdgpu_device *adev = psp->adev; + + ring = &psp->km_ring; + + ring->ring_type = ring_type; + + /* allocate 4k Page of Local Frame Buffer memory for ring */ + ring->ring_size = 0x1000; + ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + if (ret) { + ring->ring_size = 0; + return ret; + } + + return 0; +} + +static int psp_v13_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + /* Write the ring destroy command*/ + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + } else { + /* Write the ring destroy command*/ + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + } + + return ret; +} + +static int psp_v13_0_ring_create(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + unsigned int psp_ring_reg = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + ret = psp_v13_0_ring_stop(psp, ring_type); + if (ret) { + DRM_ERROR("psp_v13_0_ring_stop_sriov failed!\n"); + return ret; + } + + /* Write low address of the ring to C2PMSG_102 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, psp_ring_reg); + /* Write high address of the ring to C2PMSG_103 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, psp_ring_reg); + + /* Write the ring initialization command to C2PMSG_101 */ + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_INIT_GPCOM_RING); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_101 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + 0x80000000, 0x8000FFFF, false); + + } else { + /* Wait for sOS ready for ring creation */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + if (ret) { + DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); + return ret; + } + + /* Write low address of the ring to C2PMSG_69 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_69, psp_ring_reg); + /* Write high address of the ring to C2PMSG_70 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_70, psp_ring_reg); + /* Write size of ring to C2PMSG_71 */ + psp_ring_reg = ring->ring_size; + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_71, psp_ring_reg); + /* Write the ring initialization command to C2PMSG_64 */ + psp_ring_reg = ring_type; + psp_ring_reg = psp_ring_reg << 16; + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, false); + } + + return ret; +} + +static int psp_v13_0_ring_destroy(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + ret = psp_v13_0_ring_stop(psp, ring_type); + if (ret) + DRM_ERROR("Fail to stop psp ring\n"); + + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + + return ret; +} + +static uint32_t psp_v13_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) + data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v13_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, value); +} + +static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t reg_status; + int ret, i = 0; + + /* + * LFB address which is aligned to 1MB address and has to be + * right-shifted by 20 so that LFB address can be passed on a 32-bit C2P + * register + */ + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the address */ + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16)); + + /* FW load takes very long time */ + do { + msleep(1000); + reg_status = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35); + + if (reg_status & 0x80000000) + goto done; + + } while (++i < USBC_PD_POLLING_LIMIT_S); + + return -ETIME; +done: + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %04x\n", + reg_status & 0xFFFF); + return -EIO; + } + + return 0; +} + +static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (!ret) + *fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36); + + return ret; +} + +static const struct psp_funcs psp_v13_0_funcs = { + .init_microcode = psp_v13_0_init_microcode, + .bootloader_load_kdb = psp_v13_0_bootloader_load_kdb, + .bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv, + .bootloader_load_soc_drv = psp_v13_0_bootloader_load_soc_drv, + .bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv, + .bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv, + .bootloader_load_sos = psp_v13_0_bootloader_load_sos, + .ring_init = psp_v13_0_ring_init, + .ring_create = psp_v13_0_ring_create, + .ring_stop = psp_v13_0_ring_stop, + .ring_destroy = psp_v13_0_ring_destroy, + .ring_get_wptr = psp_v13_0_ring_get_wptr, + .ring_set_wptr = psp_v13_0_ring_set_wptr, + .load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw, + .read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw +}; + +void psp_v13_0_set_psp_funcs(struct psp_context *psp) +{ + psp->funcs = &psp_v13_0_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h new file mode 100644 index 000000000000..b2414a729ca1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __PSP_V13_0_H__ +#define __PSP_V13_0_H__ + +#include "amdgpu_psp.h" + +void psp_v13_0_set_psp_funcs(struct psp_context *psp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index fdc00938327b..1ed357cb0f49 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -50,9 +50,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); #define smnMP1_FIRMWARE_FLAGS 0x3010028 -static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554}; - -static bool psp_v3_1_support_vmr_ring(struct psp_context *psp); static int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type); @@ -60,9 +57,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; const char *chip_name; - char fw_name[30]; int err = 0; - const struct psp_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -76,55 +71,15 @@ static int psp_v3_1_init_microcode(struct psp_context *psp) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); - err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); - if (err) - goto out; - - err = amdgpu_ucode_validate(adev->psp.sos_fw); - if (err) - goto out; - - hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; - adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version); - adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes); - adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) - - le32_to_cpu(hdr->sos_size_bytes); - adev->psp.sys_start_addr = (uint8_t *)hdr + - le32_to_cpu(hdr->header.ucode_array_offset_bytes); - adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(hdr->sos_offset_bytes); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); - err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + err = psp_init_sos_microcode(psp, chip_name); if (err) - goto out; + return err; - err = amdgpu_ucode_validate(adev->psp.asd_fw); + err = psp_init_asd_microcode(psp, chip_name); if (err) - goto out; - - hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version); - adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - adev->psp.asd_start_addr = (uint8_t *)hdr + - le32_to_cpu(hdr->header.ucode_array_offset_bytes); + return err; return 0; -out: - if (err) { - dev_err(adev->dev, - "psp v3.1: Failed to load firmware \"%s\"\n", - fw_name); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; - } - - return err; } static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) @@ -147,10 +102,8 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP System Driver binary to memory */ - memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); + psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes); /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -168,41 +121,19 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) return ret; } -static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver) -{ - int i; - - if (ver == adev->psp.sos_fw_version) - return true; - - /* - * Double check if the latest four legacy versions. - * If yes, it is still the right version. - */ - for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) { - if (sos_old_versions[i] == adev->psp.sos_fw_version) - return true; - } - - return false; -} - static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) { int ret; unsigned int psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg, ver; + uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { - psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - printk("sos fw version = 0x%x.\n", psp->sos_fw_version); + if (sol_reg) return 0; - } /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), @@ -210,10 +141,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy Secure OS binary to PSP memory */ - memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); + psp_copy_fw(psp, psp->sos.start_addr, psp->sos.size_bytes); /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -227,11 +156,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, true); - - ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); - if (!psp_v3_1_match_version(adev, ver)) - DRM_WARN("SOS version doesn't match\n"); - return ret; } @@ -302,7 +226,8 @@ static int psp_v3_1_ring_create(struct psp_context *psp, psp_v3_1_reroute_ih(psp); - if (psp_v3_1_support_vmr_ring(psp)) { + if (amdgpu_sriov_vf(adev)) { + ring->ring_wptr = 0; ret = psp_v3_1_ring_stop(psp, ring_type); if (ret) { DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n"); @@ -360,34 +285,26 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; - unsigned int psp_ring_reg = 0; struct amdgpu_device *adev = psp->adev; - if (psp_v3_1_support_vmr_ring(psp)) { - /* Write the Destroy GPCOM ring command to C2PMSG_101 */ - psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg); - - /* there might be handshake issue which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); - } else { - /* Write the ring destroy command to C2PMSG_64 */ - psp_ring_reg = 3 << 16; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + /* Write the ring destroy command*/ + if (amdgpu_sriov_vf(adev)) + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); - /* there might be handshake issue which needs delay */ - mdelay(20); + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); - /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); - } + /* Wait for response flag (bit 31) */ + if (amdgpu_sriov_vf(adev)) + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); return ret; } @@ -410,187 +327,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v3_1_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v3_1_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v3_1_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - /* send interrupt to PSP for SRIOV ring write pointer update */ - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - -static int -psp_v3_1_sram_map(struct amdgpu_device *adev, - unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, - unsigned int *sram_data_reg_offset, - enum AMDGPU_UCODE_ID ucode_id) -{ - int ret = 0; - - switch(ucode_id) { -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SMC: - *sram_offset = 0; - *sram_addr_reg_offset = 0; - *sram_data_reg_offset = 0; - break; -#endif - - case AMDGPU_UCODE_ID_CP_CE: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_PFP: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_ME: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC1: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_CP_MEC2: - *sram_offset = 0x10000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_RLC_G: - *sram_offset = 0x2000; - *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); - break; - - case AMDGPU_UCODE_ID_SDMA0: - *sram_offset = 0x0; - *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); - *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); - break; - -/* TODO: needs to confirm */ -#if 0 - case AMDGPU_UCODE_ID_SDMA1: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_UVD: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; - - case AMDGPU_UCODE_ID_VCE: - *sram_offset = ; - *sram_addr_reg_offset = ; - break; -#endif - - case AMDGPU_UCODE_ID_MAXIMUM: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static bool psp_v3_1_compare_sram_data(struct psp_context *psp, - struct amdgpu_firmware_info *ucode, - enum AMDGPU_UCODE_ID ucode_type) -{ - int err = 0; - unsigned int fw_sram_reg_val = 0; - unsigned int fw_sram_addr_reg_offset = 0; - unsigned int fw_sram_data_reg_offset = 0; - unsigned int ucode_size; - uint32_t *ucode_mem = NULL; - struct amdgpu_device *adev = psp->adev; - - err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, - &fw_sram_data_reg_offset, ucode_type); - if (err) - return false; - - WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); - - ucode_size = ucode->ucode_size; - ucode_mem = (uint32_t *)ucode->kaddr; - while (ucode_size) { - fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); - - if (*ucode_mem != fw_sram_reg_val) - return false; - - ucode_mem++; - /* 4 bytes */ - ucode_size -= 4; - } - - return true; -} - static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -634,12 +370,30 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) return 0; } -static bool psp_v3_1_support_vmr_ring(struct psp_context *psp) +static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp) { - if (amdgpu_sriov_vf(psp->adev)) - return true; + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) + data = psp->km_ring.ring_wptr; + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + return data; +} - return false; +static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (amdgpu_sriov_vf(adev)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + /* send interrupt to PSP for SRIOV ring write pointer update */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_CONSUME_CMD); + psp->km_ring.ring_wptr = value; + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } static const struct psp_funcs psp_v3_1_funcs = { @@ -650,11 +404,10 @@ static const struct psp_funcs psp_v3_1_funcs = { .ring_create = psp_v3_1_ring_create, .ring_stop = psp_v3_1_ring_stop, .ring_destroy = psp_v3_1_ring_destroy, - .cmd_submit = psp_v3_1_cmd_submit, - .compare_sram_data = psp_v3_1_compare_sram_data, .smu_reload_quirk = psp_v3_1_smu_reload_quirk, .mode1_reset = psp_v3_1_mode1_reset, - .support_vmr_ring = psp_v3_1_support_vmr_ring, + .ring_get_wptr = psp_v3_1_ring_get_wptr, + .ring_set_wptr = psp_v3_1_ring_set_wptr, }; void psp_v3_1_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h deleted file mode 100644 index 055321f61ca7..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __R600_DPM_H__ -#define __R600_DPM_H__ - -#define R600_ASI_DFLT 10000 -#define R600_BSP_DFLT 0x41EB -#define R600_BSU_DFLT 0x2 -#define R600_AH_DFLT 5 -#define R600_RLP_DFLT 25 -#define R600_RMP_DFLT 65 -#define R600_LHP_DFLT 40 -#define R600_LMP_DFLT 15 -#define R600_TD_DFLT 0 -#define R600_UTC_DFLT_00 0x24 -#define R600_UTC_DFLT_01 0x22 -#define R600_UTC_DFLT_02 0x22 -#define R600_UTC_DFLT_03 0x22 -#define R600_UTC_DFLT_04 0x22 -#define R600_UTC_DFLT_05 0x22 -#define R600_UTC_DFLT_06 0x22 -#define R600_UTC_DFLT_07 0x22 -#define R600_UTC_DFLT_08 0x22 -#define R600_UTC_DFLT_09 0x22 -#define R600_UTC_DFLT_10 0x22 -#define R600_UTC_DFLT_11 0x22 -#define R600_UTC_DFLT_12 0x22 -#define R600_UTC_DFLT_13 0x22 -#define R600_UTC_DFLT_14 0x22 -#define R600_DTC_DFLT_00 0x24 -#define R600_DTC_DFLT_01 0x22 -#define R600_DTC_DFLT_02 0x22 -#define R600_DTC_DFLT_03 0x22 -#define R600_DTC_DFLT_04 0x22 -#define R600_DTC_DFLT_05 0x22 -#define R600_DTC_DFLT_06 0x22 -#define R600_DTC_DFLT_07 0x22 -#define R600_DTC_DFLT_08 0x22 -#define R600_DTC_DFLT_09 0x22 -#define R600_DTC_DFLT_10 0x22 -#define R600_DTC_DFLT_11 0x22 -#define R600_DTC_DFLT_12 0x22 -#define R600_DTC_DFLT_13 0x22 -#define R600_DTC_DFLT_14 0x22 -#define R600_VRC_DFLT 0x0000C003 -#define R600_VOLTAGERESPONSETIME_DFLT 1000 -#define R600_BACKBIASRESPONSETIME_DFLT 1000 -#define R600_VRU_DFLT 0x3 -#define R600_SPLLSTEPTIME_DFLT 0x1000 -#define R600_SPLLSTEPUNIT_DFLT 0x3 -#define R600_TPU_DFLT 0 -#define R600_TPC_DFLT 0x200 -#define R600_SSTU_DFLT 0 -#define R600_SST_DFLT 0x00C8 -#define R600_GICST_DFLT 0x200 -#define R600_FCT_DFLT 0x0400 -#define R600_FCTU_DFLT 0 -#define R600_CTXCGTT3DRPHC_DFLT 0x20 -#define R600_CTXCGTT3DRSDC_DFLT 0x40 -#define R600_VDDC3DOORPHC_DFLT 0x100 -#define R600_VDDC3DOORSDC_DFLT 0x7 -#define R600_VDDC3DOORSU_DFLT 0 -#define R600_MPLLLOCKTIME_DFLT 100 -#define R600_MPLLRESETTIME_DFLT 150 -#define R600_VCOSTEPPCT_DFLT 20 -#define R600_ENDINGVCOSTEPPCT_DFLT 5 -#define R600_REFERENCEDIVIDER_DFLT 4 - -#define R600_PM_NUMBER_OF_TC 15 -#define R600_PM_NUMBER_OF_SCLKS 20 -#define R600_PM_NUMBER_OF_MCLKS 4 -#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4 -#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3 - -/* XXX are these ok? */ -#define R600_TEMP_RANGE_MIN (90 * 1000) -#define R600_TEMP_RANGE_MAX (120 * 1000) - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - -enum r600_power_level { - R600_POWER_LEVEL_LOW = 0, - R600_POWER_LEVEL_MEDIUM = 1, - R600_POWER_LEVEL_HIGH = 2, - R600_POWER_LEVEL_CTXSW = 3, -}; - -enum r600_td { - R600_TD_AUTO, - R600_TD_UP, - R600_TD_DOWN, -}; - -enum r600_display_watermark { - R600_DISPLAY_WATERMARK_LOW = 0, - R600_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum r600_display_gap -{ - R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - R600_PM_DISPLAY_GAP_VBLANK = 1, - R600_PM_DISPLAY_GAP_WATERMARK = 2, - R600_PM_DISPLAY_GAP_IGNORE = 3, -}; -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_common.h b/drivers/gpu/drm/amd/amdgpu/sdma_common.h new file mode 100644 index 000000000000..8629ef7e8ad9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_common.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SDMA_COMMON_H__ +#define __SDMA_COMMON_H__ + +enum sdma_utcl2_cache_read_policy { + CACHE_READ_POLICY_L2__LRU = 0x00000000, + CACHE_READ_POLICY_L2__STREAM = 0x00000001, + CACHE_READ_POLICY_L2__NOA = 0x00000002, + CACHE_READ_POLICY_L2__DEFAULT = CACHE_READ_POLICY_L2__NOA, +}; + +enum sdma_utcl2_cache_write_policy { + CACHE_WRITE_POLICY_L2__LRU = 0x00000000, + CACHE_WRITE_POLICY_L2__STREAM = 0x00000001, + CACHE_WRITE_POLICY_L2__NOA = 0x00000002, + CACHE_WRITE_POLICY_L2__BYPASS = 0x00000003, + CACHE_WRITE_POLICY_L2__DEFAULT = CACHE_WRITE_POLICY_L2__BYPASS, +}; + +#endif /* __SDMA_COMMON_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index a10175838013..4509bd4cce2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -243,7 +243,9 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (VI). */ @@ -255,7 +257,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -269,7 +271,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, } /** - * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring + * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring * * @ring: amdgpu ring pointer * @@ -299,7 +301,9 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -355,8 +359,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -592,6 +594,7 @@ error_free_wb: * sdma_v2_4_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. @@ -614,7 +617,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -741,6 +745,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, /** * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * */ @@ -750,7 +755,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -790,7 +795,8 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VI). @@ -870,11 +876,10 @@ static int sdma_v2_4_sw_init(void *handle) ring->ring_obj = NULL; ring->use_doorbell = false; sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 1024, - &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -1188,10 +1193,11 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) /** * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @tmz: unused * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if @@ -1200,7 +1206,8 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + bool tmz) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); @@ -1215,7 +1222,7 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, /** * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -1260,16 +1267,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v2_4_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 5f4e2c616241..135727b59c41 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -417,7 +417,9 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (VI). */ @@ -429,7 +431,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -473,7 +475,9 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -529,8 +533,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); } - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -864,6 +866,7 @@ error_free_wb: * sdma_v3_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. @@ -886,7 +889,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -1012,6 +1016,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, /** * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * */ @@ -1021,7 +1026,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1061,7 +1066,8 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VI). @@ -1154,11 +1160,10 @@ static int sdma_v3_0_sw_init(void *handle) } sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 1024, - &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -1626,10 +1631,11 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) /** * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @tmz: unused * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if @@ -1638,7 +1644,8 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + bool tmz) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); @@ -1653,7 +1660,7 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, /** * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -1698,16 +1705,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v3_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4ef4d31f5231..e8e4749e9c79 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -46,7 +46,6 @@ #include "sdma6/sdma6_4_2_2_sh_mask.h" #include "sdma7/sdma7_4_2_2_offset.h" #include "sdma7/sdma7_4_2_2_sh_mask.h" -#include "hdp/hdp_4_0_offset.h" #include "sdma0/sdma0_4_1_default.h" #include "soc15_common.h" @@ -57,6 +56,7 @@ #include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h" #include "amdgpu_ras.h" +#include "sdma_v4_4.h" MODULE_FIRMWARE("amdgpu/vega10_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin"); @@ -69,6 +69,8 @@ MODULE_FIRMWARE("amdgpu/picasso_sdma.bin"); MODULE_FIRMWARE("amdgpu/raven2_sdma.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin"); MODULE_FIRMWARE("amdgpu/renoir_sdma.bin"); +MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L @@ -82,6 +84,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev); +static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev); static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), @@ -114,17 +117,21 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma_4_1[] = { @@ -137,7 +144,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) }; @@ -173,6 +180,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { @@ -202,6 +210,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma_rv1[] = @@ -221,27 +230,53 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), - SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002) + SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001) +}; + +static const struct soc15_reg_golden golden_settings_sdma_aldebaran[] = { + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), + SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001), }; static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { @@ -253,8 +288,107 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe) +}; + +static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = { + { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED), + 0, 0, + }, + { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED), + 0, 0, + }, + { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED), + 0, 0, + }, + { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED), + 0, 0, + }, + { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED), + 0, 0, + }, + { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED), + 0, 0, + }, + { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED), + 0, 0, + }, + { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED), + 0, 0, + }, }; static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, @@ -335,8 +469,8 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id) static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): soc15_program_register_sequence(adev, golden_settings_sdma_4, ARRAY_SIZE(golden_settings_sdma_4)); @@ -344,7 +478,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_vg10, ARRAY_SIZE(golden_settings_sdma_vg10)); break; - case CHIP_VEGA12: + case IP_VERSION(4, 0, 1): soc15_program_register_sequence(adev, golden_settings_sdma_4, ARRAY_SIZE(golden_settings_sdma_4)); @@ -352,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_vg12, ARRAY_SIZE(golden_settings_sdma_vg12)); break; - case CHIP_VEGA20: + case IP_VERSION(4, 2, 0): soc15_program_register_sequence(adev, golden_settings_sdma0_4_2_init, ARRAY_SIZE(golden_settings_sdma0_4_2_init)); @@ -363,16 +497,22 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma1_4_2, ARRAY_SIZE(golden_settings_sdma1_4_2)); break; - case CHIP_ARCTURUS: + case IP_VERSION(4, 2, 2): soc15_program_register_sequence(adev, golden_settings_sdma_arct, ARRAY_SIZE(golden_settings_sdma_arct)); break; - case CHIP_RAVEN: + case IP_VERSION(4, 4, 0): + soc15_program_register_sequence(adev, + golden_settings_sdma_aldebaran, + ARRAY_SIZE(golden_settings_sdma_aldebaran)); + break; + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): soc15_program_register_sequence(adev, golden_settings_sdma_4_1, ARRAY_SIZE(golden_settings_sdma_4_1)); - if (adev->rev_id >= 8) + if (adev->apu_flags & AMD_APU_IS_RAVEN2) soc15_program_register_sequence(adev, golden_settings_sdma_rv2, ARRAY_SIZE(golden_settings_sdma_rv2)); @@ -381,7 +521,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_rv1, ARRAY_SIZE(golden_settings_sdma_rv1)); break; - case CHIP_RENOIR: + case IP_VERSION(4, 1, 2): soc15_program_register_sequence(adev, golden_settings_sdma_4_3, ARRAY_SIZE(golden_settings_sdma_4_3)); @@ -391,6 +531,36 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev) +{ + int i; + + /* + * The only chips with SDMAv4 and ULV are VG10 and VG20. + * Server SKUs take a different hysteresis setting from other SKUs. + */ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): + if (adev->pdev->device == 0x6860) + break; + return; + case IP_VERSION(4, 2, 0): + if (adev->pdev->device == 0x66a1) + break; + return; + default: + return; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + uint32_t temp; + + temp = RREG32_SDMA(i, mmSDMA0_ULV_CNTL); + temp = REG_SET_FIELD(temp, SDMA0_ULV_CNTL, HYSTERESIS, 0x0); + WREG32_SDMA(i, mmSDMA0_ULV_CNTL, temp); + } +} + static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) { int err = 0; @@ -415,16 +585,17 @@ static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev) int i; for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->sdma.instance[i].fw != NULL) - release_firmware(adev->sdma.instance[i].fw); + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; /* arcturus shares the same FW memory across all SDMA isntances */ - if (adev->asic_type == CHIP_ARCTURUS) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || + adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) break; } - memset((void*)adev->sdma.instance, 0, + memset((void *)adev->sdma.instance, 0, sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES); } @@ -450,29 +621,36 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): chip_name = "vega10"; break; - case CHIP_VEGA12: + case IP_VERSION(4, 0, 1): chip_name = "vega12"; break; - case CHIP_VEGA20: + case IP_VERSION(4, 2, 0): chip_name = "vega20"; break; - case CHIP_RAVEN: - if (adev->rev_id >= 8) + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + if (adev->apu_flags & AMD_APU_IS_RAVEN2) chip_name = "raven2"; - else if (adev->pdev->device == 0x15d8) + else if (adev->apu_flags & AMD_APU_IS_PICASSO) chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_ARCTURUS: + case IP_VERSION(4, 2, 2): chip_name = "arcturus"; break; - case CHIP_RENOIR: - chip_name = "renoir"; + case IP_VERSION(4, 1, 2): + if (adev->apu_flags & AMD_APU_IS_RENOIR) + chip_name = "renoir"; + else + chip_name = "green_sardine"; + break; + case IP_VERSION(4, 4, 0): + chip_name = "aldebaran"; break; default: BUG(); @@ -489,11 +667,12 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) goto out; for (i = 1; i < adev->sdma.num_instances; i++) { - if (adev->asic_type == CHIP_ARCTURUS) { - /* Acturus will leverage the same FW memory + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || + adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) { + /* Acturus & Aldebaran will leverage the same FW memory for every SDMA instance */ - memcpy((void*)&adev->sdma.instance[i], - (void*)&adev->sdma.instance[0], + memcpy((void *)&adev->sdma.instance[i], + (void *)&adev->sdma.instance[0], sizeof(struct amdgpu_sdma_instance)); } else { @@ -643,7 +822,7 @@ static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring) } /** - * sdma_v4_0_ring_set_wptr - commit the write pointer + * sdma_v4_0_page_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * @@ -686,7 +865,9 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (VEGA10). */ @@ -698,7 +879,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* IB packet must end on a 8 DW boundary */ - sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -761,7 +942,9 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -823,8 +1006,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); - - sdma[i]->sched.ready = false; } } @@ -858,7 +1039,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) sdma[i] = &adev->sdma.instance[i].page; if ((adev->mman.buffer_funcs_ring == sdma[i]) && - (unset == false)) { + (!unset)) { amdgpu_ttm_set_buffer_funcs_status(adev, false); unset = true; } @@ -871,13 +1052,11 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); - - sdma[i]->sched.ready = false; } } /** - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch + * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. @@ -923,6 +1102,17 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum); } WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl); + + /* + * Enable SDMA utilization. Its only supported on + * Arcturus for the moment and firmware version 14 + * and above. + */ + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && + adev->sdma.instance[i].fw_version >= 14) + WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable); + /* Extend page fault timeout to avoid interrupt storm */ + WREG32_SDMA(i, mmSDMA0_UTCL1_TIMEOUT, 0x00800080); } } @@ -940,7 +1130,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v4_0_gfx_stop(adev); sdma_v4_0_rlc_stop(adev); if (adev->sdma.has_page_queue) @@ -954,7 +1144,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) } } -/** +/* * sdma_v4_0_rb_cntl - get parameters for rb_cntl */ static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl) @@ -1205,9 +1395,10 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev) if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA)) return; - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): sdma_v4_1_init_power_gating(adev); sdma_v4_1_update_power_gating(adev, true); break; @@ -1417,6 +1608,7 @@ error_free_wb: * sdma_v4_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VEGA10). * Returns 0 on success, error on failure. @@ -1439,7 +1631,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -1512,10 +1705,9 @@ static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (VEGA10). */ @@ -1570,8 +1762,8 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib, /** * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding - * */ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { @@ -1579,7 +1771,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1615,7 +1807,8 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VEGA10). @@ -1645,13 +1838,13 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) { uint fw_version = adev->sdma.instance[0].fw_version; - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): return fw_version >= 430; - case CHIP_VEGA12: + case IP_VERSION(4, 0, 1): /*return fw_version >= 31;*/ return false; - case CHIP_VEGA20: + case IP_VERSION(4, 2, 0): return fw_version >= 123; default: return false; @@ -1663,13 +1856,6 @@ static int sdma_v4_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) - adev->sdma.num_instances = 1; - else if (adev->asic_type == CHIP_ARCTURUS) - adev->sdma.num_instances = 8; - else - adev->sdma.num_instances = 2; - r = sdma_v4_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load sdma firmware!\n"); @@ -1677,7 +1863,8 @@ static int sdma_v4_0_early_init(void *handle) } /* TODO: Page queue breaks driver reload under SRIOV */ - if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) + if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) && + amdgpu_sriov_vf((adev))) adev->sdma.has_page_queue = false; else if (sdma_v4_0_fw_support_paging_queue(adev)) adev->sdma.has_page_queue = true; @@ -1686,6 +1873,7 @@ static int sdma_v4_0_early_init(void *handle) sdma_v4_0_set_buffer_funcs(adev); sdma_v4_0_set_vm_pte_funcs(adev); sdma_v4_0_set_irq_funcs(adev); + sdma_v4_0_set_ras_funcs(adev); return 0; } @@ -1701,7 +1889,18 @@ static int sdma_v4_0_late_init(void *handle) .cb = sdma_v4_0_process_ras_data_cb, }; - return amdgpu_sdma_ras_late_init(adev, &ih_info); + sdma_v4_0_setup_ulv(adev); + + if (!amdgpu_persistent_edc_harvesting_supported(adev)) { + if (adev->sdma.funcs && + adev->sdma.funcs->reset_ras_error_count) + adev->sdma.funcs->reset_ras_error_count(adev); + } + + if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init) + return adev->sdma.funcs->ras_late_init(adev, &ih_info); + else + return 0; } static int sdma_v4_0_sw_init(void *handle) @@ -1728,12 +1927,39 @@ static int sdma_v4_0_sw_init(void *handle) return r; } + /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_VM_HOLE, + &adev->sdma.vm_hole_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID, + &adev->sdma.doorbell_invalid_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT, + &adev->sdma.pool_timeout_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_SRBMWRITE, + &adev->sdma.srbm_write_irq); + if (r) + return r; + } + for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; ring->use_doorbell = true; - DRM_INFO("use_doorbell being set to: [%s]\n", + DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, ring->use_doorbell?"true":"false"); /* doorbell size is 2 dwords, get DWORD offset */ @@ -1741,7 +1967,8 @@ static int sdma_v4_0_sw_init(void *handle) sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -1759,7 +1986,8 @@ static int sdma_v4_0_sw_init(void *handle) sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -1773,7 +2001,8 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - amdgpu_sdma_ras_fini(adev); + if (adev->sdma.funcs && adev->sdma.funcs->ras_fini) + adev->sdma.funcs->ras_fini(adev); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); @@ -1791,9 +2020,7 @@ static int sdma_v4_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) || - (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset)) + if (adev->flags & AMD_IS_APU) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); if (!amdgpu_sriov_vf(adev)) @@ -1820,9 +2047,7 @@ static int sdma_v4_0_hw_fini(void *handle) sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); - if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs - && adev->powerplay.pp_funcs->set_powergating_by_smu) || - adev->asic_type == CHIP_RENOIR) + if (adev->flags & AMD_IS_APU) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true); return 0; @@ -1911,14 +2136,14 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, amdgpu_fence_process(&adev->sdma.instance[instance].ring); break; case 1: - if (adev->asic_type == CHIP_VEGA20) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0)) amdgpu_fence_process(&adev->sdma.instance[instance].page); break; case 2: /* XXX compute */ break; case 3: - if (adev->asic_type != CHIP_VEGA20) + if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0)) amdgpu_fence_process(&adev->sdma.instance[instance].page); break; } @@ -1983,6 +2208,72 @@ static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev, return 0; } +static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + int instance; + struct amdgpu_task_info task_info; + u64 addr; + + instance = sdma_v4_0_irq_id_to_seq(entry->client_id); + if (instance < 0 || instance >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance invalid %d\n", instance); + return -EINVAL; + } + + addr = (u64)entry->src_data[0] << 12; + addr |= ((u64)entry->src_data[1] & 0xf) << 44; + + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); + amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); + + dev_dbg_ratelimited(adev->dev, + "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u " + "pasid:%u, for process %s pid %d thread %s pid %d\n", + instance, addr, entry->src_id, entry->ring_id, entry->vmid, + entry->pasid, task_info.process_name, task_info.tgid, + task_info.task_name, task_info.pid); + return 0; +} + +static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n"); + sdma_v4_0_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); + sdma_v4_0_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, + "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n"); + sdma_v4_0_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, + "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n"); + sdma_v4_0_print_iv_entry(adev, entry); + return 0; +} + static void sdma_v4_0_update_medium_grain_clock_gating( struct amdgpu_device *adev, bool enable) @@ -2056,21 +2347,10 @@ static int sdma_v4_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - sdma_v4_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); - sdma_v4_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); - break; - default: - break; - } + sdma_v4_0_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + sdma_v4_0_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); return 0; } @@ -2079,10 +2359,12 @@ static int sdma_v4_0_set_powergating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): sdma_v4_1_update_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); + state == AMD_PG_STATE_GATE); break; default: break; @@ -2265,7 +2547,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->asic_type == CHIP_ARCTURUS && i >= 5) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs_2nd_mmhub; else @@ -2273,7 +2555,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.me = i; if (adev->sdma.has_page_queue) { - if (adev->asic_type == CHIP_ARCTURUS && i >= 5) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs_2nd_mmhub; else @@ -2298,37 +2580,55 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = { .process = amdgpu_sdma_process_ecc_irq, }; +static const struct amdgpu_irq_src_funcs sdma_v4_0_vm_hole_irq_funcs = { + .process = sdma_v4_0_process_vm_hole_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_0_doorbell_invalid_irq_funcs = { + .process = sdma_v4_0_process_doorbell_invalid_irq, +}; +static const struct amdgpu_irq_src_funcs sdma_v4_0_pool_timeout_irq_funcs = { + .process = sdma_v4_0_process_pool_timeout_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs = { + .process = sdma_v4_0_process_srbm_write_irq, +}; static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev) { + adev->sdma.trap_irq.num_types = adev->sdma.num_instances; + adev->sdma.ecc_irq.num_types = adev->sdma.num_instances; + /*For Arcturus and Aldebaran, add another 4 irq handler*/ switch (adev->sdma.num_instances) { - case 1: - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; - break; + case 5: case 8: - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances; + adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances; + adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances; + adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances; break; - case 2: default: - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; break; } adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs; adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs; adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs; + adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs; + adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs; + adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs; + adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs; } /** * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @tmz: if a secure copy should be used * * Copy GPU buffers using the DMA engine (VEGA10/12). * Used by the amdgpu ttm implementation to move pages if @@ -2337,10 +2637,12 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev) static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + bool tmz) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); @@ -2352,7 +2654,7 @@ static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib, /** * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -2409,10 +2711,88 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) sched = &adev->sdma.instance[i].page.sched; else sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = sched; + } + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; +} + +static void sdma_v4_0_get_ras_error_count(uint32_t value, + uint32_t instance, + uint32_t *sec_count) +{ + uint32_t i; + uint32_t sec_cnt; + + /* double bits error (multiple bits) error detection is not supported */ + for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) { + /* the SDMA_EDC_COUNTER register in each sdma instance + * shares the same sed shift_mask + * */ + sec_cnt = (value & + sdma_v4_0_ras_fields[i].sec_count_mask) >> + sdma_v4_0_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("Detected %s in SDMA%d, SED %d\n", + sdma_v4_0_ras_fields[i].name, + instance, sec_cnt); + *sec_count += sec_cnt; + } + } +} + +static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, + uint32_t instance, void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0; + uint32_t reg_value = 0; + + reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER); + /* double bit error is not supported */ + if (reg_value) + sdma_v4_0_get_ras_error_count(reg_value, + instance, &sec_count); + /* err_data->ce_count should be initialized to 0 + * before calling into this function */ + err_data->ce_count += sec_count; + /* double bit error is not supported + * set ue count to 0 */ + err_data->ue_count = 0; + + return 0; +}; + +static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i; + + /* read back edc counter registers to clear the counters */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) + RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); + } +} + +static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { + .ras_late_init = amdgpu_sdma_ras_late_init, + .ras_fini = amdgpu_sdma_ras_fini, + .query_ras_error_count = sdma_v4_0_query_ras_error_count, + .reset_ras_error_count = sdma_v4_0_reset_ras_error_count, +}; + +static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 2): + adev->sdma.funcs = &sdma_v4_0_ras_funcs; + break; + case IP_VERSION(4, 4, 0): + adev->sdma.funcs = &sdma_v4_4_ras_funcs; + break; + default: + break; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c new file mode 100644 index 000000000000..bf95007f0843 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c @@ -0,0 +1,253 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "sdma/sdma_4_4_0_offset.h" +#include "sdma/sdma_4_4_0_sh_mask.h" +#include "soc15.h" +#include "amdgpu_ras.h" + +#define SDMA1_REG_OFFSET 0x600 +#define SDMA2_REG_OFFSET 0x1cda0 +#define SDMA3_REG_OFFSET 0x1d1a0 +#define SDMA4_REG_OFFSET 0x1d5a0 + +/* helper function that allow only use sdma0 register offset + * to calculate register offset for all the sdma instances */ +static uint32_t sdma_v4_4_get_reg_offset(struct amdgpu_device *adev, + uint32_t instance, + uint32_t offset) +{ + uint32_t sdma_base = adev->reg_offset[SDMA0_HWIP][0][0]; + + switch (instance) { + case 0: + return (sdma_base + offset); + case 1: + return (sdma_base + SDMA1_REG_OFFSET + offset); + case 2: + return (sdma_base + SDMA2_REG_OFFSET + offset); + case 3: + return (sdma_base + SDMA3_REG_OFFSET + offset); + case 4: + return (sdma_base + SDMA4_REG_OFFSET + offset); + default: + break; + } + return 0; +} + +static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = { + { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED), + 0, 0, + }, + { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED), + 0, 0, + }, + { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UCODE_BUF_SED), + 0, 0, + }, + { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_RB_CMD_BUF_SED), + 0, 0, + }, + { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_IB_CMD_BUF_SED), + 0, 0, + }, + { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RD_FIFO_SED), + 0, 0, + }, + { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RDBST_FIFO_SED), + 0, 0, + }, + { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_DATA_LUT_FIFO_SED), + 0, 0, + }, + { "SDMA_SPLIT_DATA_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_SPLIT_DATA_BUF_SED), + 0, 0, + }, + { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED), + 0, 0, + }, + { "SDMA_MC_RDRET_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2), + SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED), + 0, 0, + }, +}; + +static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev, + uint32_t reg_offset, + uint32_t value, + uint32_t instance, + uint32_t *sec_count) +{ + uint32_t i; + uint32_t sec_cnt; + + /* double bits error (multiple bits) error detection is not supported */ + for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) { + if (sdma_v4_4_ras_fields[i].reg_offset != reg_offset) + continue; + + /* the SDMA_EDC_COUNTER register in each sdma instance + * shares the same sed shift_mask + * */ + sec_cnt = (value & + sdma_v4_4_ras_fields[i].sec_count_mask) >> + sdma_v4_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + dev_info(adev->dev, "Detected %s in SDMA%d, SED %d\n", + sdma_v4_4_ras_fields[i].name, + instance, sec_cnt); + *sec_count += sec_cnt; + } + } +} + +static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, + uint32_t instance, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0; + uint32_t reg_value = 0; + uint32_t reg_offset = 0; + + reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER); + reg_value = RREG32(reg_offset); + /* double bit error is not supported */ + if (reg_value) + sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER, reg_value, + instance, &sec_count); + + reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER2); + reg_value = RREG32(reg_offset); + /* double bit error is not supported */ + if (reg_value) + sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER2, reg_value, + instance, &sec_count); + + /* + * err_data->ue_count should be initialized to 0 + * before calling into this function + * + * SDMA RAS supports single bit uncorrectable error detection. + * So, increment uncorrectable error count. + */ + err_data->ue_count += sec_count; + + /* + * SDMA RAS does not support correctable errors. + * Set ce count to 0. + */ + err_data->ce_count = 0; + + return 0; +}; + +static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i; + uint32_t reg_offset; + + /* write 0 to EDC_COUNTER reg to clear sdma edc counters */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER); + WREG32(reg_offset, 0); + reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER2); + WREG32(reg_offset, 0); + } + } +} + +const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs = { + .ras_late_init = amdgpu_sdma_ras_late_init, + .ras_fini = amdgpu_sdma_ras_fini, + .query_ras_error_count = sdma_v4_4_query_ras_error_count, + .reset_ras_error_count = sdma_v4_4_reset_ras_error_count, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h new file mode 100644 index 000000000000..74a6e5b5e949 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h @@ -0,0 +1,28 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SDMA_V4_4_H__ +#define __SDMA_V4_4_H__ + +extern const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index f4ad2990f973..853d1511b889 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -32,7 +32,6 @@ #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" -#include "hdp/hdp_5_0_0_offset.h" #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h" #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h" @@ -40,6 +39,7 @@ #include "soc15.h" #include "navi10_sdma_pkt_open.h" #include "nbio_v2_3.h" +#include "sdma_common.h" #include "sdma_v5_0.h" MODULE_FIRMWARE("amdgpu/navi10_sdma.bin"); @@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin"); MODULE_FIRMWARE("amdgpu/navi12_sdma.bin"); MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma1.bin"); + +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin"); +MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin"); + #define SDMA1_REG_OFFSET 0x600 #define SDMA0_HYP_DEC_REG_START 0x5880 #define SDMA0_HYP_DEC_REG_END 0x5893 @@ -88,6 +94,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00) }; +static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), +}; + static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), @@ -100,7 +129,42 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { static const struct soc15_reg_golden golden_settings_sdma_nv12[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), +}; + +static const struct soc15_reg_golden golden_settings_sdma_cyan_skillfish[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x007fffff, 0x004c5c00), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x007fffff, 0x004c5c00) }; static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) @@ -123,8 +187,8 @@ static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 0, 0): soc15_program_register_sequence(adev, golden_settings_sdma_5, (const u32)ARRAY_SIZE(golden_settings_sdma_5)); @@ -132,7 +196,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_nv10, (const u32)ARRAY_SIZE(golden_settings_sdma_nv10)); break; - case CHIP_NAVI14: + case IP_VERSION(5, 0, 2): soc15_program_register_sequence(adev, golden_settings_sdma_5, (const u32)ARRAY_SIZE(golden_settings_sdma_5)); @@ -140,14 +204,24 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_nv14, (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); break; - case CHIP_NAVI12: - soc15_program_register_sequence(adev, - golden_settings_sdma_5, - (const u32)ARRAY_SIZE(golden_settings_sdma_5)); + case IP_VERSION(5, 0, 5): + if (amdgpu_sriov_vf(adev)) + soc15_program_register_sequence(adev, + golden_settings_sdma_5_sriov, + (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov)); + else + soc15_program_register_sequence(adev, + golden_settings_sdma_5, + (const u32)ARRAY_SIZE(golden_settings_sdma_5)); soc15_program_register_sequence(adev, golden_settings_sdma_nv12, (const u32)ARRAY_SIZE(golden_settings_sdma_nv12)); break; + case IP_VERSION(5, 0, 1): + soc15_program_register_sequence(adev, + golden_settings_sdma_cyan_skillfish, + (const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish)); + break; default: break; } @@ -168,24 +242,33 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; - char fw_name[30]; + char fw_name[40]; int err = 0, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; const struct sdma_firmware_header_v1_0 *hdr; + if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5))) + return 0; + DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 0, 0): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(5, 0, 2): chip_name = "navi14"; break; - case CHIP_NAVI12: + case IP_VERSION(5, 0, 5): chip_name = "navi12"; break; + case IP_VERSION(5, 0, 1): + if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) + chip_name = "cyan_skillfish2"; + else + chip_name = "cyan_skillfish"; + break; default: BUG(); } @@ -286,30 +369,20 @@ static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u64 *wptr = NULL; - uint64_t local_wptr = 0; + u64 wptr; if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */ - wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); - DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); - *wptr = (*wptr) >> 2; - DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); } else { - u32 lowbit, highbit; - - wptr = &local_wptr; - lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; - highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; - - DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", - ring->me, highbit, lowbit); - *wptr = highbit; - *wptr = (*wptr) << 32; - *wptr |= lowbit; + wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); + wptr = wptr << 32; + wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); } - return *wptr; + return wptr >> 2; } /** @@ -346,9 +419,9 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring) lower_32_bits(ring->wptr << 2), ring->me, upper_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); } } @@ -370,7 +443,9 @@ static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from * @ib: IB object to schedule + * @flags: unused * * Schedule an IB in the DMA ring (NAVI10). */ @@ -382,8 +457,15 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); - /* IB packet must end on a 8 DW boundary */ - sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); + /* An IB packet must end on a 8 DW boundary--the next dword + * must be on a 8-dword boundary. Our IB packet below is 6 + * dwords long, thus add x number of NOPs, such that, in + * modular arithmetic, + * wptr + 6 + x = 8k, k >= 0, which in C is, + * (wptr + 6 + x) % 8 = 0. + * The expression below, is a solution of x. + */ + sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); @@ -396,6 +478,32 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, } /** + * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse + * + * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from + * @ib: IB object to schedule + * + * flush the IB by graphics cache rinse. + */ +static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring) +{ + uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | + SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | + SDMA_GCR_GLI_INV(1); + + /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) | + SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) | + SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) | + SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0)); +} + +/** * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring * * @ring: amdgpu ring pointer @@ -428,7 +536,9 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -437,7 +547,6 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { - struct amdgpu_device *adev = ring->adev; bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; /* write the fence */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) | @@ -460,8 +569,7 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se amdgpu_ring_write(ring, upper_32_bits(seq)); } - /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */ - if ((flags & AMDGPU_FENCE_FLAG_INT) && adev->pdev->device != 0x50) { + if (flags & AMDGPU_FENCE_FLAG_INT) { /* generate an interrupt */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); @@ -488,16 +596,13 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { - rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); } - - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -513,7 +618,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch + * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. @@ -522,7 +627,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl, phase_quantum = 0; + u32 f32_cntl = 0, phase_quantum = 0; int i; if (amdgpu_sdma_phase_quantum) { @@ -550,18 +655,22 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) } for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (!amdgpu_sriov_vf(adev)) { + f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + } + if (enable && amdgpu_sdma_phase_quantum) { - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), phase_quantum); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), phase_quantum); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), phase_quantum); } - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + if (!amdgpu_sriov_vf(adev)) + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); } } @@ -579,11 +688,14 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v5_0_gfx_stop(adev); sdma_v5_0_rlc_stop(adev); } + if (amdgpu_sriov_vf(adev)) + return; + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); @@ -616,62 +728,68 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) ring = &adev->sdma.instance[i].ring; wb_offset = (ring->rptr_offs * 4); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); /* Initialize the ring buffer's read and write pointers */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); /* setup the wptr shadow polling */ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), lower_32_bits(wptr_gpu_addr)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); /* set the wb address whether it's enabled or not */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), + ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), + ring->gpu_addr >> 40); ring->wptr = 0; /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr) << 2); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr) << 2); } - doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_DOORBELL_OFFSET)); if (ring->use_doorbell) { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); @@ -680,8 +798,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) } else { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); } - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), + doorbell_offset); adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, 20); @@ -690,28 +809,30 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) sdma_v5_0_ring_set_wptr(ring); /* set minor_ptr_update to 0 after wptr programed */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + } if (!amdgpu_sriov_vf(adev)) { /* unhalt engine */ @@ -722,15 +843,15 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif /* enable DMA IBs */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); ring->sched.ready = true; @@ -739,11 +860,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) sdma_v5_0_enable(adev, true); } - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) return r; - } if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, true); @@ -834,10 +953,6 @@ static int sdma_v5_0_start(struct amdgpu_device *adev) r = sdma_v5_0_load_microcode(adev); if (r) return r; - - /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */ - if (amdgpu_emu_mode == 1 && adev->pdev->device == 0x4d) - msleep(1000); } /* unhalt the MEs */ @@ -907,16 +1022,9 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) udelay(1); } - if (i < adev->usec_timeout) { - if (amdgpu_emu_mode == 1) - DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i); - else - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); - } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); - r = -EINVAL; - } + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + amdgpu_device_wb_free(adev, index); return r; @@ -926,6 +1034,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) * sdma_v5_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (NAVI10). * Returns 0 on success, error on failure. @@ -950,7 +1059,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) { DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; @@ -981,13 +1091,10 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); - if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + if (tmp == 0xDEADBEEF) r = 0; - } else { - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + else r = -EINVAL; - } err1: amdgpu_ib_free(adev, &ib, NULL); @@ -1030,10 +1137,9 @@ static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (NAVI10). */ @@ -1086,10 +1192,11 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw - * + * sdma_v5_0_ring_pad_ib - pad the IB + * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding * + * Pad the IB with NOPs to a boundary multiple of 8. */ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { @@ -1097,7 +1204,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib u32 pad_count; int i; - pad_count = (8 - (ib->length_dw & 0x7)) % 8; + pad_count = (-ib->length_dw) & 0x7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0)) ib->ptr[ib->length_dw++] = @@ -1139,7 +1246,8 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (NAVI10). @@ -1187,8 +1295,6 @@ static int sdma_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->sdma.num_instances = 2; - sdma_v5_0_set_ring_funcs(adev); sdma_v5_0_set_buffer_funcs(adev); sdma_v5_0_set_vm_pte_funcs(adev); @@ -1229,7 +1335,7 @@ static int sdma_v5_0_sw_init(void *handle) ring->ring_obj = NULL; ring->use_doorbell = true; - DRM_INFO("use_doorbell being set to: [%s]\n", + DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, ring->use_doorbell?"true":"false"); ring->doorbell_index = (i == 0) ? @@ -1237,11 +1343,10 @@ static int sdma_v5_0_sw_init(void *handle) : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 1024, - &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -1254,8 +1359,12 @@ static int sdma_v5_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + } return 0; } @@ -1392,14 +1501,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev, { u32 sdma_cntl; - u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? - sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : - sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); + if (!amdgpu_sriov_vf(adev)) { + u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? + sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : + sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); - sdma_cntl = RREG32(reg_offset); - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32(reg_offset, sdma_cntl); + sdma_cntl = RREG32(reg_offset); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32(reg_offset, sdma_cntl); + } return 0; } @@ -1523,14 +1634,14 @@ static int sdma_v5_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): sdma_v5_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); sdma_v5_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -1600,8 +1711,9 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ + .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib = sdma_v5_0_ring_emit_ib, + .emit_mem_sync = sdma_v5_0_ring_emit_mem_sync, .emit_fence = sdma_v5_0_ring_emit_fence, .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush, @@ -1648,10 +1760,11 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev) /** * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @tmz: if a secure copy should be used * * Copy GPU buffers using the DMA engine (NAVI10). * Used by the amdgpu ttm implementation to move pages if @@ -1660,10 +1773,12 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev) static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + bool tmz) { ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); @@ -1675,7 +1790,7 @@ static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib, /** * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -1721,17 +1836,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = { static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h index d5a94e3d181c..d4e3c2e696f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h @@ -24,21 +24,6 @@ #ifndef __SDMA_V5_0_H__ #define __SDMA_V5_0_H__ -enum sdma_v5_0_utcl2_cache_read_policy { - CACHE_READ_POLICY_L2__LRU = 0x00000000, - CACHE_READ_POLICY_L2__STREAM = 0x00000001, - CACHE_READ_POLICY_L2__NOA = 0x00000002, - CACHE_READ_POLICY_L2__DEFAULT = CACHE_READ_POLICY_L2__NOA, -}; - -enum sdma_v5_0_utcl2_cache_write_policy { - CACHE_WRITE_POLICY_L2__LRU = 0x00000000, - CACHE_WRITE_POLICY_L2__STREAM = 0x00000001, - CACHE_WRITE_POLICY_L2__NOA = 0x00000002, - CACHE_WRITE_POLICY_L2__BYPASS = 0x00000003, - CACHE_WRITE_POLICY_L2__DEFAULT = CACHE_WRITE_POLICY_L2__BYPASS, -}; - extern const struct amd_ip_funcs sdma_v5_0_ip_funcs; extern const struct amdgpu_ip_block_version sdma_v5_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c new file mode 100644 index 000000000000..4d4d1aa51b8a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -0,0 +1,1825 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_trace.h" + +#include "gc/gc_10_3_0_offset.h" +#include "gc/gc_10_3_0_sh_mask.h" +#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h" +#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h" +#include "ivsrcid/sdma2/irqsrcs_sdma2_5_0.h" +#include "ivsrcid/sdma3/irqsrcs_sdma3_5_0.h" + +#include "soc15_common.h" +#include "soc15.h" +#include "navi10_sdma_pkt_open.h" +#include "nbio_v2_3.h" +#include "sdma_common.h" +#include "sdma_v5_2.h" + +MODULE_FIRMWARE("amdgpu/sienna_cichlid_sdma.bin"); +MODULE_FIRMWARE("amdgpu/navy_flounder_sdma.bin"); +MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin"); + +MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin"); + +#define SDMA1_REG_OFFSET 0x600 +#define SDMA3_REG_OFFSET 0x400 +#define SDMA0_HYP_DEC_REG_START 0x5880 +#define SDMA0_HYP_DEC_REG_END 0x5893 +#define SDMA1_HYP_DEC_REG_OFFSET 0x20 + +static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev); +static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev); +static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev); +static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev); + +static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) +{ + u32 base; + + if (internal_offset >= SDMA0_HYP_DEC_REG_START && + internal_offset <= SDMA0_HYP_DEC_REG_END) { + base = adev->reg_offset[GC_HWIP][0][1]; + if (instance != 0) + internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance; + } else { + if (instance < 2) { + base = adev->reg_offset[GC_HWIP][0][0]; + if (instance == 1) + internal_offset += SDMA1_REG_OFFSET; + } else { + base = adev->reg_offset[GC_HWIP][0][2]; + if (instance == 3) + internal_offset += SDMA3_REG_OFFSET; + } + } + + return base + internal_offset; +} + +static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) +{ + int err = 0; + const struct sdma_firmware_header_v1_0 *hdr; + + err = amdgpu_ucode_validate(sdma_inst->fw); + if (err) + return err; + + hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data; + sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version); + sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version); + + if (sdma_inst->feature_version >= 20) + sdma_inst->burst_nop = true; + + return 0; +} + +static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev) +{ + release_firmware(adev->sdma.instance[0].fw); + + memset((void *)adev->sdma.instance, 0, + sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES); +} + +/** + * sdma_v5_2_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ + +// emulation only, won't work on real chip +// navi10 real chip need to use PSP to load firmware +static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[40]; + int err = 0, i; + struct amdgpu_firmware_info *info = NULL; + const struct common_firmware_header *header = NULL; + + DRM_DEBUG("\n"); + + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 2, 0): + chip_name = "sienna_cichlid"; + break; + case IP_VERSION(5, 2, 2): + chip_name = "navy_flounder"; + break; + case IP_VERSION(5, 2, 1): + chip_name = "vangogh"; + break; + case IP_VERSION(5, 2, 4): + chip_name = "dimgrey_cavefish"; + break; + case IP_VERSION(5, 2, 5): + chip_name = "beige_goby"; + break; + case IP_VERSION(5, 2, 3): + chip_name = "yellow_carp"; + break; + default: + BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); + + err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev); + if (err) + goto out; + + err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]); + if (err) + goto out; + + for (i = 1; i < adev->sdma.num_instances; i++) + memcpy((void *)&adev->sdma.instance[i], + (void *)&adev->sdma.instance[0], + sizeof(struct amdgpu_sdma_instance)); + + if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0))) + return 0; + + DRM_DEBUG("psp_load == '%s'\n", + adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + for (i = 0; i < adev->sdma.num_instances; i++) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; + info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; + info->fw = adev->sdma.instance[i].fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + } + +out: + if (err) { + DRM_ERROR("sdma_v5_2: Failed to load firmware \"%s\"\n", fw_name); + sdma_v5_2_destroy_inst_ctx(adev); + } + return err; +} + +static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring) +{ + unsigned ret; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE)); + amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, 1); + ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */ + amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ + + return ret; +} + +static void sdma_v5_2_ring_patch_cond_exec(struct amdgpu_ring *ring, + unsigned offset) +{ + unsigned cur; + + BUG_ON(offset > ring->buf_mask); + BUG_ON(ring->ring[offset] != 0x55aa55aa); + + cur = (ring->wptr - 1) & ring->buf_mask; + if (cur > offset) + ring->ring[offset] = cur - offset; + else + ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; +} + +/** + * sdma_v5_2_ring_get_rptr - get the current read pointer + * + * @ring: amdgpu ring pointer + * + * Get the current rptr from the hardware (NAVI10+). + */ +static uint64_t sdma_v5_2_ring_get_rptr(struct amdgpu_ring *ring) +{ + u64 *rptr; + + /* XXX check if swapping is necessary on BE */ + rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); + + DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); + return ((*rptr) >> 2); +} + +/** + * sdma_v5_2_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware (NAVI10+). + */ +static uint64_t sdma_v5_2_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 wptr; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); + } else { + wptr = RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); + wptr = wptr << 32; + wptr |= RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); + } + + return wptr >> 2; +} + +/** + * sdma_v5_2_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware (NAVI10+). + */ +static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + DRM_DEBUG("Setting write pointer\n"); + if (ring->use_doorbell) { + DRM_DEBUG("Using doorbell -- " + "wptr_offs == 0x%08x " + "lower_32_bits(ring->wptr) << 2 == 0x%08x " + "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", + ring->wptr_offs, + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + /* XXX check if swapping is necessary on BE */ + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); + adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); + DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", + ring->doorbell_index, ring->wptr << 2); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + } else { + DRM_DEBUG("Not using doorbell -- " + "mmSDMA%i_GFX_RB_WPTR == 0x%08x " + "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", + ring->me, + lower_32_bits(ring->wptr << 2), + ring->me, + upper_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); + } +} + +static void sdma_v5_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); + int i; + + for (i = 0; i < count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + amdgpu_ring_write(ring, ring->funcs->nop | + SDMA_PKT_NOP_HEADER_COUNT(count - 1)); + else + amdgpu_ring_write(ring, ring->funcs->nop); +} + +/** + * sdma_v5_2_ring_emit_ib - Schedule an IB on the DMA engine + * + * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from + * @ib: IB object to schedule + * @flags: unused + * + * Schedule an IB in the DMA ring. + */ +static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); + + /* An IB packet must end on a 8 DW boundary--the next dword + * must be on a 8-dword boundary. Our IB packet below is 6 + * dwords long, thus add x number of NOPs, such that, in + * modular arithmetic, + * wptr + 6 + x = 8k, k >= 0, which in C is, + * (wptr + 6 + x) % 8 = 0. + * The expression below, is a solution of x. + */ + sdma_v5_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); + /* base must be 32 byte aligned */ + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); + amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); + amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); +} + +/** + * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse + * + * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from + * @ib: IB object to schedule + * + * flush the IB by graphics cache rinse. + */ +static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring) +{ + uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | + SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV | + SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | + SDMA_GCR_GLI_INV(1); + + /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) | + SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) | + SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) | + SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0)); +} + +/** + * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring + * + * @ring: amdgpu ring pointer + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 ref_and_mask = 0; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); + amdgpu_ring_write(ring, ref_and_mask); /* reference */ + amdgpu_ring_write(ring, ref_and_mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ +} + +/** + * sdma_v5_2_ring_emit_fence - emit a fence on the DMA ring + * + * @ring: amdgpu ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed. + */ +static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + /* write the fence */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) | + SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */ + /* zero in first two bits */ + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + /* optionally write high bits as well */ + if (write64bit) { + addr += 4; + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) | + SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); + /* zero in first two bits */ + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + } + + if (flags & AMDGPU_FENCE_FLAG_INT) { + /* generate an interrupt */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); + } +} + + +/** + * sdma_v5_2_gfx_stop - stop the gfx async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the gfx async dma ring buffers. + */ +static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; + struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; + struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring; + struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring; + u32 rb_cntl, ib_cntl; + int i; + + if ((adev->mman.buffer_funcs_ring == sdma0) || + (adev->mman.buffer_funcs_ring == sdma1) || + (adev->mman.buffer_funcs_ring == sdma2) || + (adev->mman.buffer_funcs_ring == sdma3)) + amdgpu_ttm_set_buffer_funcs_status(adev, false); + + for (i = 0; i < adev->sdma.num_instances; i++) { + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + } +} + +/** + * sdma_v5_2_rlc_stop - stop the compute async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the compute async dma queues. + */ +static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev) +{ + /* XXX todo */ +} + +/** + * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch. + */ +static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl, phase_quantum = 0; + int i; + + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (enable && amdgpu_sdma_phase_quantum) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + } + +} + +/** + * sdma_v5_2_enable - stop the async dma engines + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. + * + * Halt or unhalt the async dma engines. + */ +static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl; + int i; + + if (!enable) { + sdma_v5_2_gfx_stop(adev); + sdma_v5_2_rlc_stop(adev); + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + } +} + +/** + * sdma_v5_2_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 rb_cntl, ib_cntl; + u32 rb_bufsz; + u32 wb_offset; + u32 doorbell; + u32 doorbell_offset; + u32 temp; + u32 wptr_poll_cntl; + u64 wptr_gpu_addr; + int i, r; + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + wb_offset = (ring->rptr_offs * 4); + + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); +#ifdef __BIG_ENDIAN + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); +#endif + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, + mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA0_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + wptr_poll_cntl); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + + ring->wptr = 0; + + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + } + + doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + + if (ring->use_doorbell) { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + } else { + doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); + } + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); + + if (amdgpu_sriov_vf(adev)) + sdma_v5_2_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + + if (!amdgpu_sriov_vf(adev)) { + /* unhalt engine */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); + } + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + + ring->sched.ready = true; + + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } + + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->sched.ready = false; + return r; + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + } + + return 0; +} + +/** + * sdma_v5_2_rlc_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the compute DMA queues and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v5_2_rlc_resume(struct amdgpu_device *adev) +{ + return 0; +} + +/** + * sdma_v5_2_load_microcode - load the sDMA ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the sDMA0/1/2/3 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) +{ + const struct sdma_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + u32 fw_size; + int i, j; + + /* halt the MEs */ + sdma_v5_2_enable(adev, false); + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (!adev->sdma.instance[i].fw) + return -EINVAL; + + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; + amdgpu_ucode_print_sdma_hdr(&hdr->header); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + + fw_data = (const __le32 *) + (adev->sdma.instance[i].fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0); + + for (j = 0; j < fw_size; j++) { + if (amdgpu_emu_mode == 1 && j % 500 == 0) + msleep(1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); + } + + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); + } + + return 0; +} + +static int sdma_v5_2_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset; + u32 tmp; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + grbm_soft_reset = REG_SET_FIELD(0, + GRBM_SOFT_RESET, SOFT_RESET_SDMA0, + 1); + grbm_soft_reset <<= i; + + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); + + udelay(50); + } + + return 0; +} + +/** + * sdma_v5_2_start - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the DMA engines and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v5_2_start(struct amdgpu_device *adev) +{ + int r = 0; + + if (amdgpu_sriov_vf(adev)) { + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); + + /* set RB registers */ + r = sdma_v5_2_gfx_resume(adev); + return r; + } + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + r = sdma_v5_2_load_microcode(adev); + if (r) + return r; + + /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */ + if (amdgpu_emu_mode == 1) + msleep(1000); + } + + /* TODO: check whether can submit a doorbell request to raise + * a doorbell fence to exit gfxoff. + */ + if (adev->in_s0ix) + amdgpu_gfx_off_ctrl(adev, false); + + sdma_v5_2_soft_reset(adev); + /* unhalt the MEs */ + sdma_v5_2_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v5_2_ctx_switch_enable(adev, true); + + /* start the gfx rings and rlc compute queues */ + r = sdma_v5_2_gfx_resume(adev); + if (adev->in_s0ix) + amdgpu_gfx_off_ctrl(adev, true); + if (r) + return r; + r = sdma_v5_2_rlc_resume(adev); + + return r; +} + +/** + * sdma_v5_2_ring_test_ring - simple async dma engine test + * + * @ring: amdgpu_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. + * Returns 0 for success, error for failure. + */ +static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + unsigned i; + unsigned index; + int r; + u32 tmp; + u64 gpu_addr; + + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ring_alloc(ring, 5); + if (r) { + DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); + amdgpu_device_wb_free(adev, index); + return r; + } + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + if (amdgpu_emu_mode == 1) + msleep(1); + else + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + amdgpu_device_wb_free(adev, index); + + return r; +} + +/** + * sdma_v5_2_ring_test_ib - test an IB on the DMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Test a simple IB in the DMA ring. + * Returns 0 on success, error on failure. + */ +static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + struct dma_fence *f = NULL; + unsigned index; + long r; + u32 tmp = 0; + u64 gpu_addr; + + r = amdgpu_device_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err0; + } + + ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); + ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0); + ib.ptr[4] = 0xDEADBEEF; + ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.length_dw = 8; + + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + if (r) + goto err1; + + r = dma_fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; + goto err1; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + r = 0; + else + r = -EINVAL; + +err1: + amdgpu_ib_free(adev, &ib, NULL); + dma_fence_put(f); +err0: + amdgpu_device_wb_free(adev, index); + return r; +} + + +/** + * sdma_v5_2_vm_copy_pte - update PTEs by copying them from the GART + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using sDMA. + */ +static void sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes - 1; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + +} + +/** + * sdma_v5_2_vm_write_pte - update PTEs by writing them manually + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @value: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * + * Update PTEs by writing them manually using sDMA. + */ +static void sdma_v5_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) +{ + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw - 1; + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; + } +} + +/** + * sdma_v5_2_vm_set_pte_pde - update the page tables using sDMA + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA. + */ +static void sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint64_t flags) +{ + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ + ib->ptr[ib->length_dw++] = upper_32_bits(flags); + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ +} + +/** + * sdma_v5_2_ring_pad_ib - pad the IB + * + * @ib: indirect buffer to fill with padding + * @ring: amdgpu_ring structure holding ring information + * + * Pad the IB with NOPs to a boundary multiple of 8. + */ +static void sdma_v5_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) +{ + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); + u32 pad_count; + int i; + + pad_count = (-ib->length_dw) & 0x7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | + SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); + else + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP); +} + + +/** + * sdma_v5_2_ring_emit_pipeline_sync - sync the pipeline + * + * @ring: amdgpu_ring pointer + * + * Make sure all previous operations are completed (CIK). + */ +static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + /* wait for idle */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ + SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); /* reference */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ +} + + +/** + * sdma_v5_2_ring_emit_vm_flush - vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vmid: vmid number to use + * @pd_addr: address + * + * Update the page table base and flush the VM TLB + * using sDMA. + */ +static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); +} + +static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, val); +} + +static void sdma_v5_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); /* reference */ + amdgpu_ring_write(ring, mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); +} + +static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + amdgpu_ring_emit_wreg(ring, reg0, ref); + /* wait for a cycle to reset vm_inv_eng*_ack */ + amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0); + amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); +} + +static int sdma_v5_2_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + sdma_v5_2_set_ring_funcs(adev); + sdma_v5_2_set_buffer_funcs(adev); + sdma_v5_2_set_vm_pte_funcs(adev); + sdma_v5_2_set_irq_funcs(adev); + + return 0; +} + +static unsigned sdma_v5_2_seq_to_irq_id(int seq_num) +{ + switch (seq_num) { + case 0: + return SOC15_IH_CLIENTID_SDMA0; + case 1: + return SOC15_IH_CLIENTID_SDMA1; + case 2: + return SOC15_IH_CLIENTID_SDMA2; + case 3: + return SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid; + default: + break; + } + return -EINVAL; +} + +static unsigned sdma_v5_2_seq_to_trap_id(int seq_num) +{ + switch (seq_num) { + case 0: + return SDMA0_5_0__SRCID__SDMA_TRAP; + case 1: + return SDMA1_5_0__SRCID__SDMA_TRAP; + case 2: + return SDMA2_5_0__SRCID__SDMA_TRAP; + case 3: + return SDMA3_5_0__SRCID__SDMA_TRAP; + default: + break; + } + return -EINVAL; +} + +static int sdma_v5_2_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* SDMA trap event */ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v5_2_seq_to_irq_id(i), + sdma_v5_2_seq_to_trap_id(i), + &adev->sdma.trap_irq); + if (r) + return r; + } + + r = sdma_v5_2_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load sdma firmware!\n"); + return r; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->me = i; + + DRM_INFO("use_doorbell being set to: [%s]\n", + ring->use_doorbell?"true":"false"); + + ring->doorbell_index = + (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset + + sprintf(ring->name, "sdma%d", i); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + } + + return r; +} + +static int sdma_v5_2_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + + sdma_v5_2_destroy_inst_ctx(adev); + + return 0; +} + +static int sdma_v5_2_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = sdma_v5_2_start(adev); + + return r; +} + +static int sdma_v5_2_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + return 0; + + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); + + return 0; +} + +static int sdma_v5_2_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return sdma_v5_2_hw_fini(adev); +} + +static int sdma_v5_2_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return sdma_v5_2_hw_init(adev); +} + +static bool sdma_v5_2_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + u32 tmp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS_REG)); + + if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) + return false; + } + + return true; +} + +static int sdma_v5_2_wait_for_idle(void *handle) +{ + unsigned i; + u32 sdma0, sdma1, sdma2, sdma3; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); + sdma1 = RREG32(sdma_v5_2_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG)); + sdma2 = RREG32(sdma_v5_2_get_reg_offset(adev, 2, mmSDMA0_STATUS_REG)); + sdma3 = RREG32(sdma_v5_2_get_reg_offset(adev, 3, mmSDMA0_STATUS_REG)); + + if (sdma0 & sdma1 & sdma2 & sdma3 & SDMA0_STATUS_REG__IDLE_MASK) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring) +{ + int i, r = 0; + struct amdgpu_device *adev = ring->adev; + u32 index = 0; + u64 sdma_gfx_preempt; + + amdgpu_sdma_get_index_from_ring(ring, &index); + sdma_gfx_preempt = + sdma_v5_2_get_reg_offset(adev, index, mmSDMA0_GFX_PREEMPT); + + /* assert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, false); + + /* emit the trailing fence */ + ring->trail_seq += 1; + amdgpu_ring_alloc(ring, 10); + sdma_v5_2_ring_emit_fence(ring, ring->trail_fence_gpu_addr, + ring->trail_seq, 0); + amdgpu_ring_commit(ring); + + /* assert IB preemption */ + WREG32(sdma_gfx_preempt, 1); + + /* poll the trailing fence */ + for (i = 0; i < adev->usec_timeout; i++) { + if (ring->trail_seq == + le32_to_cpu(*(ring->trail_fence_cpu_addr))) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) { + r = -EINVAL; + DRM_ERROR("ring %d failed to be preempted\n", ring->idx); + } + + /* deassert IB preemption */ + WREG32(sdma_gfx_preempt, 0); + + /* deassert the preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, true); + return r; +} + +static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_cntl; + + u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL); + + sdma_cntl = RREG32(reg_offset); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32(reg_offset, sdma_cntl); + + return 0; +} + +static int sdma_v5_2_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: SDMA trap\n"); + switch (entry->client_id) { + case SOC15_IH_CLIENTID_SDMA0: + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->sdma.instance[0].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + case 3: + /* XXX page queue*/ + break; + } + break; + case SOC15_IH_CLIENTID_SDMA1: + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->sdma.instance[1].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + case 3: + /* XXX page queue*/ + break; + } + break; + case SOC15_IH_CLIENTID_SDMA2: + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->sdma.instance[2].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + case 3: + /* XXX page queue*/ + break; + } + break; + case SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid: + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->sdma.instance[3].ring); + break; + case 1: + /* XXX compute */ + break; + case 2: + /* XXX compute */ + break; + case 3: + /* XXX page queue*/ + break; + } + break; + } + return 0; +} + +static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + return 0; +} + +static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + + if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1)) + adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { + /* Enable sdma clock gating */ + def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL)); + data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK); + if (def != data) + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data); + } else { + /* Disable sdma clock gating */ + def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL)); + data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK | + SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK); + if (def != data) + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data); + } + } +} + +static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + + if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1)) + adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { + /* Enable sdma mem light sleep */ + def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL)); + data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data); + + } else { + /* Disable sdma mem light sleep */ + def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL)); + data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data); + + } + } +} + +static int sdma_v5_2_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 1): + case IP_VERSION(5, 2, 4): + case IP_VERSION(5, 2, 5): + case IP_VERSION(5, 2, 3): + sdma_v5_2_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + sdma_v5_2_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); + break; + default: + break; + } + + return 0; +} + +static int sdma_v5_2_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static void sdma_v5_2_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + if (amdgpu_sriov_vf(adev)) + *flags = 0; + + /* AMD_CG_SUPPORT_SDMA_LS */ + data = RREG32_KIQ(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL)); + if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) + *flags |= AMD_CG_SUPPORT_SDMA_LS; +} + +const struct amd_ip_funcs sdma_v5_2_ip_funcs = { + .name = "sdma_v5_2", + .early_init = sdma_v5_2_early_init, + .late_init = NULL, + .sw_init = sdma_v5_2_sw_init, + .sw_fini = sdma_v5_2_sw_fini, + .hw_init = sdma_v5_2_hw_init, + .hw_fini = sdma_v5_2_hw_fini, + .suspend = sdma_v5_2_suspend, + .resume = sdma_v5_2_resume, + .is_idle = sdma_v5_2_is_idle, + .wait_for_idle = sdma_v5_2_wait_for_idle, + .soft_reset = sdma_v5_2_soft_reset, + .set_clockgating_state = sdma_v5_2_set_clockgating_state, + .set_powergating_state = sdma_v5_2_set_powergating_state, + .get_clockgating_state = sdma_v5_2_get_clockgating_state, +}; + +static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_GFXHUB_0, + .get_rptr = sdma_v5_2_ring_get_rptr, + .get_wptr = sdma_v5_2_ring_get_wptr, + .set_wptr = sdma_v5_2_ring_set_wptr, + .emit_frame_size = + 5 + /* sdma_v5_2_ring_init_cond_exec */ + 6 + /* sdma_v5_2_ring_emit_hdp_flush */ + 3 + /* hdp_invalidate */ + 6 + /* sdma_v5_2_ring_emit_pipeline_sync */ + /* sdma_v5_2_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */ + .emit_ib = sdma_v5_2_ring_emit_ib, + .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync, + .emit_fence = sdma_v5_2_ring_emit_fence, + .emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v5_2_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v5_2_ring_emit_hdp_flush, + .test_ring = sdma_v5_2_ring_test_ring, + .test_ib = sdma_v5_2_ring_test_ib, + .insert_nop = sdma_v5_2_ring_insert_nop, + .pad_ib = sdma_v5_2_ring_pad_ib, + .emit_wreg = sdma_v5_2_ring_emit_wreg, + .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait, + .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait, + .init_cond_exec = sdma_v5_2_ring_init_cond_exec, + .patch_cond_exec = sdma_v5_2_ring_patch_cond_exec, + .preempt_ib = sdma_v5_2_ring_preempt_ib, +}; + +static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->sdma.instance[i].ring.funcs = &sdma_v5_2_ring_funcs; + adev->sdma.instance[i].ring.me = i; + } +} + +static const struct amdgpu_irq_src_funcs sdma_v5_2_trap_irq_funcs = { + .set = sdma_v5_2_set_trap_irq_state, + .process = sdma_v5_2_process_trap_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v5_2_illegal_inst_irq_funcs = { + .process = sdma_v5_2_process_illegal_inst_irq, +}; + +static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + + adev->sdma.num_instances; + adev->sdma.trap_irq.funcs = &sdma_v5_2_trap_irq_funcs; + adev->sdma.illegal_inst_irq.funcs = &sdma_v5_2_illegal_inst_irq_funcs; +} + +/** + * sdma_v5_2_emit_copy_buffer - copy buffer using the sDMA engine + * + * @ib: indirect buffer to copy to + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * @tmz: if a secure copy should be used + * + * Copy GPU buffers using the DMA engine. + * Used by the amdgpu ttm implementation to move pages if + * registered as the asic copy callback. + */ +static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count, + bool tmz) +{ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + ib->ptr[ib->length_dw++] = byte_count - 1; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); +} + +/** + * sdma_v5_2_emit_fill_buffer - fill buffer using the sDMA engine + * + * @ib: indirect buffer to fill + * @src_data: value to write to buffer + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Fill GPU buffers using the DMA engine. + */ +static void sdma_v5_2_emit_fill_buffer(struct amdgpu_ib *ib, + uint32_t src_data, + uint64_t dst_offset, + uint32_t byte_count) +{ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = src_data; + ib->ptr[ib->length_dw++] = byte_count - 1; +} + +static const struct amdgpu_buffer_funcs sdma_v5_2_buffer_funcs = { + .copy_max_bytes = 0x400000, + .copy_num_dw = 7, + .emit_copy_buffer = sdma_v5_2_emit_copy_buffer, + + .fill_max_bytes = 0x400000, + .fill_num_dw = 5, + .emit_fill_buffer = sdma_v5_2_emit_fill_buffer, +}; + +static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev) +{ + if (adev->mman.buffer_funcs == NULL) { + adev->mman.buffer_funcs = &sdma_v5_2_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; + } +} + +static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = { + .copy_pte_num_dw = 7, + .copy_pte = sdma_v5_2_vm_copy_pte, + .write_pte = sdma_v5_2_vm_write_pte, + .set_pte_pde = sdma_v5_2_vm_set_pte_pde, +}; + +static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev) +{ + unsigned i; + + if (adev->vm_manager.vm_pte_funcs == NULL) { + adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; + } + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; + } +} + +const struct amdgpu_ip_block_version sdma_v5_2_ip_block = { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 5, + .minor = 2, + .rev = 0, + .funcs = &sdma_v5_2_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h new file mode 100644 index 000000000000..b70414fef2a1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SDMA_V5_2_H__ +#define __SDMA_V5_2_H__ + +extern const struct amd_ip_funcs sdma_v5_2_ip_funcs; +extern const struct amdgpu_ip_block_version sdma_v5_2_ip_block; + +#endif /* __SDMA_V5_2_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f2d70a47a3af..e6d2f74a7976 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -26,6 +26,8 @@ #include <linux/module.h> #include <linux/pci.h> +#include <drm/amdgpu_drm.h> + #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_ih.h" @@ -41,15 +43,19 @@ #include "si_dma.h" #include "dce_v6_0.h" #include "si.h" -#include "dce_virtual.h" +#include "uvd_v3_1.h" +#include "amdgpu_vkms.h" #include "gca/gfx_6_0_d.h" #include "oss/oss_1_0_d.h" +#include "oss/oss_1_0_sh_mask.h" #include "gmc/gmc_6_0_d.h" #include "dce/dce_6_0_d.h" #include "uvd/uvd_4_0_d.h" #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" +#include "amdgpu_dm.h" + static const u32 tahiti_golden_registers[] = { mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, @@ -901,6 +907,114 @@ static const u32 hainan_mgcg_cgcg_init[] = 0x3630, 0xfffffff0, 0x00000100, }; +/* XXX: update when we support VCE */ +#if 0 +/* tahiti, pitcarin, verde */ +static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 0, + }, +}; + +static const struct amdgpu_video_codecs tahiti_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array), + .codec_array = tahiti_video_codecs_encode_array, +}; +#else +static const struct amdgpu_video_codecs tahiti_video_codecs_encode = +{ + .codec_count = 0, + .codec_array = NULL, +}; +#endif +/* oland and hainan don't support encode */ +static const struct amdgpu_video_codecs hainan_video_codecs_encode = +{ + .codec_count = 0, + .codec_array = NULL, +}; + +/* tahiti, pitcarin, verde, oland */ +static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 3, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 5, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 41, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, + .max_width = 2048, + .max_height = 1152, + .max_pixels_per_frame = 2048 * 1152, + .max_level = 4, + }, +}; + +static const struct amdgpu_video_codecs tahiti_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(tahiti_video_codecs_decode_array), + .codec_array = tahiti_video_codecs_decode_array, +}; + +/* hainan doesn't support decode */ +static const struct amdgpu_video_codecs hainan_video_codecs_decode = +{ + .codec_count = 0, + .codec_array = NULL, +}; + +static int si_query_video_codecs(struct amdgpu_device *adev, bool encode, + const struct amdgpu_video_codecs **codecs) +{ + switch (adev->asic_type) { + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + if (encode) + *codecs = &tahiti_video_codecs_encode; + else + *codecs = &tahiti_video_codecs_decode; + return 0; + case CHIP_OLAND: + if (encode) + *codecs = &hainan_video_codecs_encode; + else + *codecs = &tahiti_video_codecs_decode; + return 0; + case CHIP_HAINAN: + if (encode) + *codecs = &hainan_video_codecs_encode; + else + *codecs = &hainan_video_codecs_decode; + return 0; + default: + return -EINVAL; + } +} + static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags; @@ -973,6 +1087,28 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } +static u32 si_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + r = RREG32(mmUVD_CTX_DATA); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); + return r; +} + +static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); + WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); + WREG32(mmUVD_CTX_DATA, (v)); + spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); +} + static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {GRBM_STATUS}, {mmGRBM_STATUS2}, @@ -1191,18 +1327,124 @@ static bool si_read_bios_from_rom(struct amdgpu_device *adev, return true; } -//xxx: not implemented -static int si_asic_reset(struct amdgpu_device *adev) +static void si_set_clk_bypass_mode(struct amdgpu_device *adev) { - return 0; + u32 tmp, i; + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_BYPASS_EN; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp |= SPLL_CTLREQ_CHG; + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) + break; + udelay(1); + } + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + tmp = RREG32(MPLL_CNTL_MODE); + tmp &= ~MPLL_MCLK_SEL; + WREG32(MPLL_CNTL_MODE, tmp); +} + +static void si_spll_powerdown(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(SPLL_CNTL_MODE); + tmp |= SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_RESET; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_SLEEP; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(SPLL_CNTL_MODE); + tmp &= ~SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); +} + +static int si_gpu_pci_config_reset(struct amdgpu_device *adev) +{ + u32 i; + int r = -EINVAL; + + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + /* set mclk/sclk to bypass */ + si_set_clk_bypass_mode(adev); + /* powerdown spll */ + si_spll_powerdown(adev); + /* disable BM */ + pci_clear_master(adev->pdev); + /* reset */ + amdgpu_device_pci_config_reset(adev); + + udelay(100); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + adev->has_hw_reset = true; + r = 0; + break; + } + udelay(1); + } + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + + return r; +} + +static bool si_asic_supports_baco(struct amdgpu_device *adev) +{ + return false; } static enum amd_reset_method si_asic_reset_method(struct amdgpu_device *adev) { + if (amdgpu_reset_method == AMD_RESET_METHOD_PCI) + return amdgpu_reset_method; + else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY && + amdgpu_reset_method != -1) + dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", + amdgpu_reset_method); + return AMD_RESET_METHOD_LEGACY; } +static int si_asic_reset(struct amdgpu_device *adev) +{ + int r; + + switch (si_asic_reset_method(adev)) { + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + r = amdgpu_device_pci_reset(adev); + break; + default: + dev_info(adev->dev, "PCI CONFIG reset\n"); + r = si_gpu_pci_config_reset(adev); + break; + } + + return r; +} + static u32 si_get_config_memsize(struct amdgpu_device *adev) { return RREG32(mmCONFIG_MEMSIZE); @@ -1213,7 +1455,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state) uint32_t temp; temp = RREG32(CONFIG_CNTL); - if (state == false) { + if (!state) { temp &= ~(1<<0); temp |= (1<<1); } else { @@ -1224,7 +1466,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state) static u32 si_get_xclk(struct amdgpu_device *adev) { - u32 reference_clock = adev->clock.spll.reference_freq; + u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; tmp = RREG32(CG_CLKPIN_CNTL_2); @@ -1238,18 +1480,6 @@ static u32 si_get_xclk(struct amdgpu_device *adev) return reference_clock; } -//xxx:not implemented -static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) -{ - return 0; -} - -static void si_detect_hw_virtualization(struct amdgpu_device *adev) -{ - if (is_virtual_machine()) /* passthrough mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; -} - static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { @@ -1405,6 +1635,362 @@ static uint64_t si_get_pcie_replay_count(struct amdgpu_device *adev) return (nak_r + nak_g); } +static int si_uvd_send_upll_ctlreq(struct amdgpu_device *adev, + unsigned cg_upll_func_cntl) +{ + unsigned i; + + /* Make sure UPLL_CTLREQ is deasserted */ + WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); + + mdelay(10); + + /* Assert UPLL_CTLREQ */ + WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); + + /* Wait for CTLACK and CTLACK2 to get asserted */ + for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) { + uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; + + if ((RREG32(cg_upll_func_cntl) & mask) == mask) + break; + mdelay(10); + } + + /* Deassert UPLL_CTLREQ */ + WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK); + + if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) { + DRM_ERROR("Timeout setting UVD clocks!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static unsigned si_uvd_calc_upll_post_div(unsigned vco_freq, + unsigned target_freq, + unsigned pd_min, + unsigned pd_even) +{ + unsigned post_div = vco_freq / target_freq; + + /* Adjust to post divider minimum value */ + if (post_div < pd_min) + post_div = pd_min; + + /* We alway need a frequency less than or equal the target */ + if ((vco_freq / post_div) > target_freq) + post_div += 1; + + /* Post dividers above a certain value must be even */ + if (post_div > pd_even && post_div % 2) + post_div += 1; + + return post_div; +} + +/** + * si_calc_upll_dividers - calc UPLL clock dividers + * + * @adev: amdgpu_device pointer + * @vclk: wanted VCLK + * @dclk: wanted DCLK + * @vco_min: minimum VCO frequency + * @vco_max: maximum VCO frequency + * @fb_factor: factor to multiply vco freq with + * @fb_mask: limit and bitmask for feedback divider + * @pd_min: post divider minimum + * @pd_max: post divider maximum + * @pd_even: post divider must be even above this value + * @optimal_fb_div: resulting feedback divider + * @optimal_vclk_div: resulting vclk post divider + * @optimal_dclk_div: resulting dclk post divider + * + * Calculate dividers for UVDs UPLL (except APUs). + * Returns zero on success; -EINVAL on error. + */ +static int si_calc_upll_dividers(struct amdgpu_device *adev, + unsigned vclk, unsigned dclk, + unsigned vco_min, unsigned vco_max, + unsigned fb_factor, unsigned fb_mask, + unsigned pd_min, unsigned pd_max, + unsigned pd_even, + unsigned *optimal_fb_div, + unsigned *optimal_vclk_div, + unsigned *optimal_dclk_div) +{ + unsigned vco_freq, ref_freq = adev->clock.spll.reference_freq; + + /* Start off with something large */ + unsigned optimal_score = ~0; + + /* Loop through vco from low to high */ + vco_min = max(max(vco_min, vclk), dclk); + for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) { + uint64_t fb_div = (uint64_t)vco_freq * fb_factor; + unsigned vclk_div, dclk_div, score; + + do_div(fb_div, ref_freq); + + /* fb div out of range ? */ + if (fb_div > fb_mask) + break; /* It can oly get worse */ + + fb_div &= fb_mask; + + /* Calc vclk divider with current vco freq */ + vclk_div = si_uvd_calc_upll_post_div(vco_freq, vclk, + pd_min, pd_even); + if (vclk_div > pd_max) + break; /* vco is too big, it has to stop */ + + /* Calc dclk divider with current vco freq */ + dclk_div = si_uvd_calc_upll_post_div(vco_freq, dclk, + pd_min, pd_even); + if (dclk_div > pd_max) + break; /* vco is too big, it has to stop */ + + /* Calc score with current vco freq */ + score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); + + /* Determine if this vco setting is better than current optimal settings */ + if (score < optimal_score) { + *optimal_fb_div = fb_div; + *optimal_vclk_div = vclk_div; + *optimal_dclk_div = dclk_div; + optimal_score = score; + if (optimal_score == 0) + break; /* It can't get better than this */ + } + } + + /* Did we found a valid setup ? */ + if (optimal_score == ~0) + return -EINVAL; + + return 0; +} + +static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) +{ + unsigned fb_div = 0, vclk_div = 0, dclk_div = 0; + int r; + + /* Bypass vclk and dclk with bclk */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + /* Put PLL in bypass mode */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); + + if (!vclk || !dclk) { + /* Keep the Bypass mode */ + return 0; + } + + r = si_calc_upll_dividers(adev, vclk, dclk, 125000, 250000, + 16384, 0x03FFFFFF, 0, 128, 5, + &fb_div, &vclk_div, &dclk_div); + if (r) + return r; + + /* Set RESET_ANTI_MUX to 0 */ + WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); + + /* Set VCO_MODE to 1 */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); + + /* Disable sleep mode */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); + + /* Deassert UPLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); + + mdelay(1); + + r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* Assert UPLL_RESET again */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); + + /* Disable spread spectrum. */ + WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); + + /* Set feedback divider */ + WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK); + + /* Set ref divider to 0 */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK); + + if (fb_div < 307200) + WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9); + else + WREG32_P(CG_UPLL_FUNC_CNTL_4, + UPLL_SPARE_ISPARE9, + ~UPLL_SPARE_ISPARE9); + + /* Set PDIV_A and PDIV_B */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div), + ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK)); + + /* Give the PLL some time to settle */ + mdelay(15); + + /* Deassert PLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); + + mdelay(15); + + /* Switch from bypass mode to normal mode */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); + + r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* Switch VCLK and DCLK selection */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + mdelay(100); + + return 0; +} + +static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev) +{ + unsigned i; + + /* Make sure VCEPLL_CTLREQ is deasserted */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); + + mdelay(10); + + /* Assert UPLL_CTLREQ */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); + + /* Wait for CTLACK and CTLACK2 to get asserted */ + for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) { + uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; + + if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask) + break; + mdelay(10); + } + + /* Deassert UPLL_CTLREQ */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); + + if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) { + DRM_ERROR("Timeout setting UVD clocks!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) +{ + unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0; + int r; + + /* Bypass evclk and ecclk with bclk */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1), + ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); + + /* Put PLL in bypass mode */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK, + ~VCEPLL_BYPASS_EN_MASK); + + if (!evclk || !ecclk) { + /* Keep the Bypass mode, put PLL to sleep */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, + ~VCEPLL_SLEEP_MASK); + return 0; + } + + r = si_calc_upll_dividers(adev, evclk, ecclk, 125000, 250000, + 16384, 0x03FFFFFF, 0, 128, 5, + &fb_div, &evclk_div, &ecclk_div); + if (r) + return r; + + /* Set RESET_ANTI_MUX to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); + + /* Set VCO_MODE to 1 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK, + ~VCEPLL_VCO_MODE_MASK); + + /* Toggle VCEPLL_SLEEP to 1 then back to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, + ~VCEPLL_SLEEP_MASK); + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK); + + /* Deassert VCEPLL_RESET */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); + + mdelay(1); + + r = si_vce_send_vcepll_ctlreq(adev); + if (r) + return r; + + /* Assert VCEPLL_RESET again */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK); + + /* Disable spread spectrum. */ + WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); + + /* Set feedback divider */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, + VCEPLL_FB_DIV(fb_div), + ~VCEPLL_FB_DIV_MASK); + + /* Set ref divider to 0 */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK); + + /* Set PDIV_A and PDIV_B */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div), + ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK)); + + /* Give the PLL some time to settle */ + mdelay(15); + + /* Deassert PLL_RESET */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); + + mdelay(15); + + /* Switch from bypass mode to normal mode */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK); + + r = si_vce_send_vcepll_ctlreq(adev); + if (r) + return r; + + /* Switch VCLK and DCLK selection */ + WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, + EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16), + ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); + + mdelay(100); + + return 0; +} + +static void si_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, @@ -1415,7 +2001,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .set_vga_state = &si_vga_set_state, .get_xclk = &si_get_xclk, .set_uvd_clocks = &si_set_uvd_clocks, - .set_vce_clocks = NULL, + .set_vce_clocks = &si_set_vce_clocks, .get_pcie_lanes = &si_get_pcie_lanes, .set_pcie_lanes = &si_set_pcie_lanes, .get_config_memsize = &si_get_config_memsize, @@ -1425,6 +2011,9 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .get_pcie_usage = &si_get_pcie_usage, .need_reset_on_init = &si_need_reset_on_init, .get_pcie_replay_count = &si_get_pcie_replay_count, + .supports_baco = &si_asic_supports_baco, + .pre_asic_init = &si_pre_asic_init, + .query_video_codecs = &si_query_video_codecs, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) @@ -1443,8 +2032,8 @@ static int si_common_early_init(void *handle) adev->pcie_wreg = &si_pcie_wreg; adev->pciep_rreg = &si_pciep_rreg; adev->pciep_wreg = &si_pciep_wreg; - adev->uvd_ctx_rreg = NULL; - adev->uvd_ctx_wreg = NULL; + adev->uvd_ctx_rreg = si_uvd_ctx_rreg; + adev->uvd_ctx_wreg = si_uvd_ctx_wreg; adev->didt_rreg = NULL; adev->didt_wreg = NULL; @@ -2159,8 +2748,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block = int si_set_ip_blocks(struct amdgpu_device *adev) { - si_detect_hw_virtualization(adev); - switch (adev->asic_type) { case CHIP_VERDE: case CHIP_TAHITI: @@ -2172,10 +2759,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif else amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block); - /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ + amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_OLAND: @@ -2186,11 +2777,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif else amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block); - - /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ + amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_HAINAN: @@ -2201,7 +2795,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); break; default: BUG(); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index bdda8b4e03f0..195b45bcb8ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -81,7 +81,9 @@ static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, * si_dma_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer - * @fence: amdgpu fence object + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate @@ -124,7 +126,6 @@ static void si_dma_stop(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_buffer_funcs_status(adev, false); - ring->sched.ready = false; } } @@ -245,6 +246,7 @@ error_free_wb: * si_dma_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. @@ -267,7 +269,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) tmp = 0xCAFEDEAD; adev->wb.wb[index] = cpu_to_le32(tmp); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); if (r) goto err0; @@ -302,7 +305,7 @@ err0: } /** - * cik_dma_vm_copy_pte - update PTEs by copying them from the GART + * si_dma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -399,8 +402,9 @@ static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * si_dma_pad_ib - pad the IB to the required number of dw + * si_dma_ring_pad_ib - pad the IB to the required number of dw * + * @ring: amdgpu_ring pointer * @ib: indirect buffer to fill with padding * */ @@ -411,7 +415,7 @@ static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) } /** - * cik_sdma_ring_emit_pipeline_sync - sync the pipeline + * si_dma_ring_emit_pipeline_sync - sync the pipeline * * @ring: amdgpu_ring pointer * @@ -436,7 +440,8 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) * si_dma_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer - * @vm: amdgpu_vm pointer + * @vmid: vmid number to use + * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VI). @@ -502,9 +507,9 @@ static int si_dma_sw_init(void *handle) sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -648,7 +653,7 @@ static int si_dma_set_clockgating_state(void *handle, bool enable; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - enable = (state == AMD_CG_STATE_GATE) ? true : false; + enable = (state == AMD_CG_STATE_GATE); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { for (i = 0; i < adev->sdma.num_instances; i++) { @@ -763,10 +768,11 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev) /** * si_dma_emit_copy_buffer - copy buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_offset: src GPU address * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer + * @tmz: is this a secure operation * * Copy GPU buffers using the DMA engine (VI). * Used by the amdgpu ttm implementation to move pages if @@ -775,7 +781,8 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev) static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, uint64_t src_offset, uint64_t dst_offset, - uint32_t byte_count) + uint32_t byte_count, + bool tmz) { ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, byte_count); @@ -788,7 +795,7 @@ static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, /** * si_dma_emit_fill_buffer - fill buffer using the sDMA engine * - * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to copy to * @src_data: value to write to buffer * @dst_offset: dst GPU address * @byte_count: number of bytes to xfer @@ -834,16 +841,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) { - struct drm_gpu_scheduler *sched; unsigned i; adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + adev->vm_manager.vm_pte_scheds[i] = + &adev->sdma.instance[i].ring.sched; } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; } const struct amdgpu_ip_block_version si_dma_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c deleted file mode 100644 index 4cb4c891120b..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ /dev/null @@ -1,8081 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/module.h> -#include <linux/pci.h> - -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_dpm.h" -#include "amdgpu_atombios.h" -#include "amd_pcie.h" -#include "sid.h" -#include "r600_dpm.h" -#include "si_dpm.h" -#include "atom.h" -#include "../include/pptable.h" -#include <linux/math64.h> -#include <linux/seq_file.h> -#include <linux/firmware.h> - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define SMC_RAM_END 0x20000 - -#define SCLK_MIN_DEEPSLEEP_FREQ 1350 - - -/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 - -#define BIOS_SCRATCH_4 0x5cd - -MODULE_FIRMWARE("amdgpu/tahiti_smc.bin"); -MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin"); -MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/verde_smc.bin"); -MODULE_FIRMWARE("amdgpu/verde_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/oland_smc.bin"); -MODULE_FIRMWARE("amdgpu/oland_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/hainan_smc.bin"); -MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin"); - -static const struct amd_pm_funcs si_dpm_funcs; - -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; - struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; - struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; -}; - -union fan_info { - struct _ATOM_PPLIB_FANTABLE fan; - struct _ATOM_PPLIB_FANTABLE2 fan2; - struct _ATOM_PPLIB_FANTABLE3 fan3; -}; - -union pplib_clock_info { - struct _ATOM_PPLIB_R600_CLOCK_INFO r600; - struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; - struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; - struct _ATOM_PPLIB_SI_CLOCK_INFO si; -}; - -static const u32 r600_utc[R600_PM_NUMBER_OF_TC] = -{ - R600_UTC_DFLT_00, - R600_UTC_DFLT_01, - R600_UTC_DFLT_02, - R600_UTC_DFLT_03, - R600_UTC_DFLT_04, - R600_UTC_DFLT_05, - R600_UTC_DFLT_06, - R600_UTC_DFLT_07, - R600_UTC_DFLT_08, - R600_UTC_DFLT_09, - R600_UTC_DFLT_10, - R600_UTC_DFLT_11, - R600_UTC_DFLT_12, - R600_UTC_DFLT_13, - R600_UTC_DFLT_14, -}; - -static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = -{ - R600_DTC_DFLT_00, - R600_DTC_DFLT_01, - R600_DTC_DFLT_02, - R600_DTC_DFLT_03, - R600_DTC_DFLT_04, - R600_DTC_DFLT_05, - R600_DTC_DFLT_06, - R600_DTC_DFLT_07, - R600_DTC_DFLT_08, - R600_DTC_DFLT_09, - R600_DTC_DFLT_10, - R600_DTC_DFLT_11, - R600_DTC_DFLT_12, - R600_DTC_DFLT_13, - R600_DTC_DFLT_14, -}; - -static const struct si_cac_config_reg cac_weights_tahiti[] = -{ - { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_tahiti[] = -{ - { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } - -}; - -static const struct si_cac_config_reg cac_override_tahiti[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_tahiti = -{ - ((1 << 16) | 27027), - 6, - 0, - 4, - 95, - { - 0UL, - 0UL, - 4521550UL, - 309631529UL, - -1270850L, - 4513710L, - 40 - }, - 595000000UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_tahiti = -{ - { 1159409, 0, 0, 0, 0 }, - { 777, 0, 0, 0, 0 }, - 2, - 54000, - 127000, - 25, - 2, - 10, - 13, - { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 }, - { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 }, - { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 }, - 85, - false -}; - -#if 0 -static const struct si_dte_data dte_data_tahiti_le = -{ - { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 }, - { 0x7D, 0x7D, 0x4E4, 0xB00, 0 }, - 0x5, - 0xAFC8, - 0x64, - 0x32, - 1, - 0, - 0x10, - { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 }, - { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 }, - { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 }, - 85, - true -}; -#endif - -static const struct si_dte_data dte_data_tahiti_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_new_zealand = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 }, - { 0x29B, 0x3E9, 0x537, 0x7D2, 0 }, - 0x5, - 0xAFC8, - 0x69, - 0x32, - 1, - 0, - 0x10, - { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 }, - 85, - true -}; - -static const struct si_dte_data dte_data_aruba_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_malta = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_cac_config_reg cac_weights_pitcairn[] = -{ - { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_pitcairn[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_override_pitcairn[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_pitcairn = -{ - ((1 << 16) | 27027), - 5, - 0, - 6, - 100, - { - 51600000UL, - 1800000UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_pitcairn = -{ - { 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0 }, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - 0, - false -}; - -static const struct si_dte_data dte_data_curacao_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_curacao_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_neptune_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_cac_config_reg cac_weights_chelsea_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_chelsea_xt[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_heathrow[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_cape_verde[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_cape_verde[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_override_cape_verde[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_cape_verde = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 7, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_cape_verde = -{ - { 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0 }, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - 0, - false -}; - -static const struct si_dte_data dte_data_venus_xtx = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 }, - 5, - 55000, - 0x69, - 0xA, - 1, - 0, - 0x3, - { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_venus_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 }, - 5, - 55000, - 0x69, - 0xA, - 1, - 0, - 0x3, - { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_venus_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 }, - 5, - 55000, - 0x69, - 0xA, - 1, - 0, - 0x3, - { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_cac_config_reg cac_weights_oland[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_mars_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_mars_xt[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_oland_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_oland_xt[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_oland[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_mars_pro[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_override_oland[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_oland = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 7, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_powertune_data powertune_data_mars_pro = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 7, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_oland = -{ - { 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0 }, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - 0, - false -}; - -static const struct si_dte_data dte_data_mars_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 55000, - 105, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_sun_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 55000, - 105, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - - -static const struct si_cac_config_reg cac_weights_hainan[] = -{ - { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_hainan = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 9, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev); -static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev); -static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev); -static struct si_ps *si_get_ps(struct amdgpu_ps *rps); - -static int si_populate_voltage_value(struct amdgpu_device *adev, - const struct atom_voltage_table *table, - u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); -static int si_get_std_voltage_value(struct amdgpu_device *adev, - SISLANDS_SMC_VOLTAGE_VALUE *voltage, - u16 *std_voltage); -static int si_write_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 value); -static int si_convert_power_level_to_smc(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level); -static int si_calculate_sclk_params(struct amdgpu_device *adev, - u32 engine_clock, - SISLANDS_SMC_SCLK_VALUE *sclk); - -static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev); -static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev); -static void si_dpm_set_irq_funcs(struct amdgpu_device *adev); - -static struct si_power_info *si_get_pi(struct amdgpu_device *adev) -{ - struct si_power_info *pi = adev->pm.dpm.priv; - return pi; -} - -static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, - u16 v, s32 t, u32 ileakage, u32 *leakage) -{ - s64 kt, kv, leakage_w, i_leakage, vddc; - s64 temperature, t_slope, t_intercept, av, bv, t_ref; - s64 tmp; - - i_leakage = div64_s64(drm_int2fixp(ileakage), 100); - vddc = div64_s64(drm_int2fixp(v), 1000); - temperature = div64_s64(drm_int2fixp(t), 1000); - - t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000); - t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000); - av = div64_s64(drm_int2fixp(coeff->av), 100000000); - bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); - t_ref = drm_int2fixp(coeff->t_ref); - - tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; - kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); - kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); - kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); - - leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); - - *leakage = drm_fixp2int(leakage_w * 1000); -} - -static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev, - const struct ni_leakage_coeffients *coeff, - u16 v, - s32 t, - u32 i_leakage, - u32 *leakage) -{ - si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage); -} - -static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff, - const u32 fixed_kt, u16 v, - u32 ileakage, u32 *leakage) -{ - s64 kt, kv, leakage_w, i_leakage, vddc; - - i_leakage = div64_s64(drm_int2fixp(ileakage), 100); - vddc = div64_s64(drm_int2fixp(v), 1000); - - kt = div64_s64(drm_int2fixp(fixed_kt), 100000000); - kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000), - drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc))); - - leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); - - *leakage = drm_fixp2int(leakage_w * 1000); -} - -static void si_calculate_leakage_for_v(struct amdgpu_device *adev, - const struct ni_leakage_coeffients *coeff, - const u32 fixed_kt, - u16 v, - u32 i_leakage, - u32 *leakage) -{ - si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage); -} - - -static void si_update_dte_from_pl2(struct amdgpu_device *adev, - struct si_dte_data *dte_data) -{ - u32 p_limit1 = adev->pm.dpm.tdp_limit; - u32 p_limit2 = adev->pm.dpm.near_tdp_limit; - u32 k = dte_data->k; - u32 t_max = dte_data->max_t; - u32 t_split[5] = { 10, 15, 20, 25, 30 }; - u32 t_0 = dte_data->t0; - u32 i; - - if (p_limit2 != 0 && p_limit2 <= p_limit1) { - dte_data->tdep_count = 3; - - for (i = 0; i < k; i++) { - dte_data->r[i] = - (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) / - (p_limit2 * (u32)100); - } - - dte_data->tdep_r[1] = dte_data->r[4] * 2; - - for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) { - dte_data->tdep_r[i] = dte_data->r[4]; - } - } else { - DRM_ERROR("Invalid PL2! DTE will not be updated.\n"); - } -} - -static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) -{ - struct ni_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static struct si_ps *si_get_ps(struct amdgpu_ps *aps) -{ - struct si_ps *ps = aps->ps_priv; - - return ps; -} - -static void si_initialize_powertune_defaults(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - bool update_dte_from_pl2 = false; - - if (adev->asic_type == CHIP_TAHITI) { - si_pi->cac_weights = cac_weights_tahiti; - si_pi->lcac_config = lcac_tahiti; - si_pi->cac_override = cac_override_tahiti; - si_pi->powertune_data = &powertune_data_tahiti; - si_pi->dte_data = dte_data_tahiti; - - switch (adev->pdev->device) { - case 0x6798: - si_pi->dte_data.enable_dte_by_default = true; - break; - case 0x6799: - si_pi->dte_data = dte_data_new_zealand; - break; - case 0x6790: - case 0x6791: - case 0x6792: - case 0x679E: - si_pi->dte_data = dte_data_aruba_pro; - update_dte_from_pl2 = true; - break; - case 0x679B: - si_pi->dte_data = dte_data_malta; - update_dte_from_pl2 = true; - break; - case 0x679A: - si_pi->dte_data = dte_data_tahiti_pro; - update_dte_from_pl2 = true; - break; - default: - if (si_pi->dte_data.enable_dte_by_default == true) - DRM_ERROR("DTE is not enabled!\n"); - break; - } - } else if (adev->asic_type == CHIP_PITCAIRN) { - si_pi->cac_weights = cac_weights_pitcairn; - si_pi->lcac_config = lcac_pitcairn; - si_pi->cac_override = cac_override_pitcairn; - si_pi->powertune_data = &powertune_data_pitcairn; - - switch (adev->pdev->device) { - case 0x6810: - case 0x6818: - si_pi->dte_data = dte_data_curacao_xt; - update_dte_from_pl2 = true; - break; - case 0x6819: - case 0x6811: - si_pi->dte_data = dte_data_curacao_pro; - update_dte_from_pl2 = true; - break; - case 0x6800: - case 0x6806: - si_pi->dte_data = dte_data_neptune_xt; - update_dte_from_pl2 = true; - break; - default: - si_pi->dte_data = dte_data_pitcairn; - break; - } - } else if (adev->asic_type == CHIP_VERDE) { - si_pi->lcac_config = lcac_cape_verde; - si_pi->cac_override = cac_override_cape_verde; - si_pi->powertune_data = &powertune_data_cape_verde; - - switch (adev->pdev->device) { - case 0x683B: - case 0x683F: - case 0x6829: - case 0x6835: - si_pi->cac_weights = cac_weights_cape_verde_pro; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x682C: - si_pi->cac_weights = cac_weights_cape_verde_pro; - si_pi->dte_data = dte_data_sun_xt; - update_dte_from_pl2 = true; - break; - case 0x6825: - case 0x6827: - si_pi->cac_weights = cac_weights_heathrow; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x6824: - case 0x682D: - si_pi->cac_weights = cac_weights_chelsea_xt; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x682F: - si_pi->cac_weights = cac_weights_chelsea_pro; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x6820: - si_pi->cac_weights = cac_weights_heathrow; - si_pi->dte_data = dte_data_venus_xtx; - break; - case 0x6821: - si_pi->cac_weights = cac_weights_heathrow; - si_pi->dte_data = dte_data_venus_xt; - break; - case 0x6823: - case 0x682B: - case 0x6822: - case 0x682A: - si_pi->cac_weights = cac_weights_chelsea_pro; - si_pi->dte_data = dte_data_venus_pro; - break; - default: - si_pi->cac_weights = cac_weights_cape_verde; - si_pi->dte_data = dte_data_cape_verde; - break; - } - } else if (adev->asic_type == CHIP_OLAND) { - si_pi->lcac_config = lcac_mars_pro; - si_pi->cac_override = cac_override_oland; - si_pi->powertune_data = &powertune_data_mars_pro; - si_pi->dte_data = dte_data_mars_pro; - - switch (adev->pdev->device) { - case 0x6601: - case 0x6621: - case 0x6603: - case 0x6605: - si_pi->cac_weights = cac_weights_mars_pro; - update_dte_from_pl2 = true; - break; - case 0x6600: - case 0x6606: - case 0x6620: - case 0x6604: - si_pi->cac_weights = cac_weights_mars_xt; - update_dte_from_pl2 = true; - break; - case 0x6611: - case 0x6613: - case 0x6608: - si_pi->cac_weights = cac_weights_oland_pro; - update_dte_from_pl2 = true; - break; - case 0x6610: - si_pi->cac_weights = cac_weights_oland_xt; - update_dte_from_pl2 = true; - break; - default: - si_pi->cac_weights = cac_weights_oland; - si_pi->lcac_config = lcac_oland; - si_pi->cac_override = cac_override_oland; - si_pi->powertune_data = &powertune_data_oland; - si_pi->dte_data = dte_data_oland; - break; - } - } else if (adev->asic_type == CHIP_HAINAN) { - si_pi->cac_weights = cac_weights_hainan; - si_pi->lcac_config = lcac_oland; - si_pi->cac_override = cac_override_oland; - si_pi->powertune_data = &powertune_data_hainan; - si_pi->dte_data = dte_data_sun_xt; - update_dte_from_pl2 = true; - } else { - DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n"); - return; - } - - ni_pi->enable_power_containment = false; - ni_pi->enable_cac = false; - ni_pi->enable_sq_ramping = false; - si_pi->enable_dte = false; - - if (si_pi->powertune_data->enable_powertune_by_default) { - ni_pi->enable_power_containment = true; - ni_pi->enable_cac = true; - if (si_pi->dte_data.enable_dte_by_default) { - si_pi->enable_dte = true; - if (update_dte_from_pl2) - si_update_dte_from_pl2(adev, &si_pi->dte_data); - - } - ni_pi->enable_sq_ramping = true; - } - - ni_pi->driver_calculate_cac_leakage = true; - ni_pi->cac_configuration_required = true; - - if (ni_pi->cac_configuration_required) { - ni_pi->support_cac_long_term_average = true; - si_pi->dyn_powertune_data.l2_lta_window_size = - si_pi->powertune_data->l2_lta_window_size_default; - si_pi->dyn_powertune_data.lts_truncate = - si_pi->powertune_data->lts_truncate_default; - } else { - ni_pi->support_cac_long_term_average = false; - si_pi->dyn_powertune_data.l2_lta_window_size = 0; - si_pi->dyn_powertune_data.lts_truncate = 0; - } - - si_pi->dyn_powertune_data.disable_uvd_powertune = false; -} - -static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev) -{ - return 1; -} - -static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) -{ - u32 xclk; - u32 wintime; - u32 cac_window; - u32 cac_window_size; - - xclk = amdgpu_asic_get_xclk(adev); - - if (xclk == 0) - return 0; - - cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; - cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); - - wintime = (cac_window_size * 100) / xclk; - - return wintime; -} - -static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor) -{ - return power_in_watts; -} - -static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev, - bool adjust_polarity, - u32 tdp_adjustment, - u32 *tdp_limit, - u32 *near_tdp_limit) -{ - u32 adjustment_delta, max_tdp_limit; - - if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit) - return -EINVAL; - - max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100; - - if (adjust_polarity) { - *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; - *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit); - } else { - *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; - adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit; - if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted) - *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta; - else - *near_tdp_limit = 0; - } - - if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit)) - return -EINVAL; - if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit)) - return -EINVAL; - - return 0; -} - -static int si_populate_smc_tdp_limits(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (ni_pi->enable_power_containment) { - SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; - PP_SIslands_PAPMParameters *papm_parm; - struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; - u32 scaling_factor = si_get_smc_power_scaling_factor(adev); - u32 tdp_limit; - u32 near_tdp_limit; - int ret; - - if (scaling_factor == 0) - return -EINVAL; - - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); - - ret = si_calculate_adjusted_tdp_limits(adev, - false, /* ??? */ - adev->pm.dpm.tdp_adjustment, - &tdp_limit, - &near_tdp_limit); - if (ret) - return ret; - - smc_table->dpm2Params.TDPLimit = - cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); - smc_table->dpm2Params.NearTDPLimit = - cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000); - smc_table->dpm2Params.SafePowerLimit = - cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); - - ret = amdgpu_si_copy_bytes_to_smc(adev, - (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + - offsetof(PP_SIslands_DPM2Parameters, TDPLimit)), - (u8 *)(&(smc_table->dpm2Params.TDPLimit)), - sizeof(u32) * 3, - si_pi->sram_end); - if (ret) - return ret; - - if (si_pi->enable_ppm) { - papm_parm = &si_pi->papm_parm; - memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters)); - papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp); - papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max); - papm_parm->dGPU_T_Warning = cpu_to_be32(95); - papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5); - papm_parm->PlatformPowerLimit = 0xffffffff; - papm_parm->NearTDPLimitPAPM = 0xffffffff; - - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start, - (u8 *)papm_parm, - sizeof(PP_SIslands_PAPMParameters), - si_pi->sram_end); - if (ret) - return ret; - } - } - return 0; -} - -static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (ni_pi->enable_power_containment) { - SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; - u32 scaling_factor = si_get_smc_power_scaling_factor(adev); - int ret; - - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); - - smc_table->dpm2Params.NearTDPLimit = - cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); - smc_table->dpm2Params.SafePowerLimit = - cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); - - ret = amdgpu_si_copy_bytes_to_smc(adev, - (si_pi->state_table_start + - offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + - offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)), - (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)), - sizeof(u32) * 2, - si_pi->sram_end); - if (ret) - return ret; - } - - return 0; -} - -static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev, - const u16 prev_std_vddc, - const u16 curr_std_vddc) -{ - u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN; - u64 prev_vddc = (u64)prev_std_vddc; - u64 curr_vddc = (u64)curr_std_vddc; - u64 pwr_efficiency_ratio, n, d; - - if ((prev_vddc == 0) || (curr_vddc == 0)) - return 0; - - n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); - d = prev_vddc * prev_vddc; - pwr_efficiency_ratio = div64_u64(n, d); - - if (pwr_efficiency_ratio > (u64)0xFFFF) - return 0; - - return (u16)pwr_efficiency_ratio; -} - -static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - if (si_pi->dyn_powertune_data.disable_uvd_powertune && - amdgpu_state->vclk && amdgpu_state->dclk) - return true; - - return false; -} - -struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev) -{ - struct evergreen_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static int si_populate_power_containment_values(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - SISLANDS_SMC_VOLTAGE_VALUE vddc; - u32 prev_sclk; - u32 max_sclk; - u32 min_sclk; - u16 prev_std_vddc; - u16 curr_std_vddc; - int i; - u16 pwr_efficiency_ratio; - u8 max_ps_percent; - bool disable_uvd_power_tune; - int ret; - - if (ni_pi->enable_power_containment == false) - return 0; - - if (state->performance_level_count == 0) - return -EINVAL; - - if (smc_state->levelCount != state->performance_level_count) - return -EINVAL; - - disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state); - - smc_state->levels[0].dpm2.MaxPS = 0; - smc_state->levels[0].dpm2.NearTDPDec = 0; - smc_state->levels[0].dpm2.AboveSafeInc = 0; - smc_state->levels[0].dpm2.BelowSafeInc = 0; - smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0; - - for (i = 1; i < state->performance_level_count; i++) { - prev_sclk = state->performance_levels[i-1].sclk; - max_sclk = state->performance_levels[i].sclk; - if (i == 1) - max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M; - else - max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H; - - if (prev_sclk > max_sclk) - return -EINVAL; - - if ((max_ps_percent == 0) || - (prev_sclk == max_sclk) || - disable_uvd_power_tune) - min_sclk = max_sclk; - else if (i == 1) - min_sclk = prev_sclk; - else - min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; - - if (min_sclk < state->performance_levels[0].sclk) - min_sclk = state->performance_levels[0].sclk; - - if (min_sclk == 0) - return -EINVAL; - - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - state->performance_levels[i-1].vddc, &vddc); - if (ret) - return ret; - - ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc); - if (ret) - return ret; - - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - state->performance_levels[i].vddc, &vddc); - if (ret) - return ret; - - ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc); - if (ret) - return ret; - - pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev, - prev_std_vddc, curr_std_vddc); - - smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk); - smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC; - smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC; - smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC; - smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio); - } - - return 0; -} - -static int si_populate_sq_ramping_values(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - u32 sq_power_throttle, sq_power_throttle2; - bool enable_sq_ramping = ni_pi->enable_sq_ramping; - int i; - - if (state->performance_level_count == 0) - return -EINVAL; - - if (smc_state->levelCount != state->performance_level_count) - return -EINVAL; - - if (adev->pm.dpm.sq_ramping_threshold == 0) - return -EINVAL; - - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) - enable_sq_ramping = false; - - for (i = 0; i < state->performance_level_count; i++) { - sq_power_throttle = 0; - sq_power_throttle2 = 0; - - if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && - enable_sq_ramping) { - sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); - sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); - sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); - sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); - sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); - } else { - sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; - sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - } - - smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); - smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2); - } - - return 0; -} - -static int si_enable_power_containment(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - bool enable) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - PPSMC_Result smc_result; - int ret = 0; - - if (ni_pi->enable_power_containment) { - if (enable) { - if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); - if (smc_result != PPSMC_Result_OK) { - ret = -EINVAL; - ni_pi->pc_enabled = false; - } else { - ni_pi->pc_enabled = true; - } - } - } else { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); - if (smc_result != PPSMC_Result_OK) - ret = -EINVAL; - ni_pi->pc_enabled = false; - } - } - - return ret; -} - -static int si_initialize_smc_dte_tables(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - int ret = 0; - struct si_dte_data *dte_data = &si_pi->dte_data; - Smc_SIslands_DTE_Configuration *dte_tables = NULL; - u32 table_size; - u8 tdep_count; - u32 i; - - if (dte_data == NULL) - si_pi->enable_dte = false; - - if (si_pi->enable_dte == false) - return 0; - - if (dte_data->k <= 0) - return -EINVAL; - - dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL); - if (dte_tables == NULL) { - si_pi->enable_dte = false; - return -ENOMEM; - } - - table_size = dte_data->k; - - if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES) - table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES; - - tdep_count = dte_data->tdep_count; - if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE) - tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; - - dte_tables->K = cpu_to_be32(table_size); - dte_tables->T0 = cpu_to_be32(dte_data->t0); - dte_tables->MaxT = cpu_to_be32(dte_data->max_t); - dte_tables->WindowSize = dte_data->window_size; - dte_tables->temp_select = dte_data->temp_select; - dte_tables->DTE_mode = dte_data->dte_mode; - dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold); - - if (tdep_count > 0) - table_size--; - - for (i = 0; i < table_size; i++) { - dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]); - dte_tables->R[i] = cpu_to_be32(dte_data->r[i]); - } - - dte_tables->Tdep_count = tdep_count; - - for (i = 0; i < (u32)tdep_count; i++) { - dte_tables->T_limits[i] = dte_data->t_limits[i]; - dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]); - dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]); - } - - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start, - (u8 *)dte_tables, - sizeof(Smc_SIslands_DTE_Configuration), - si_pi->sram_end); - kfree(dte_tables); - - return ret; -} - -static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev, - u16 *max, u16 *min) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct amdgpu_cac_leakage_table *table = - &adev->pm.dpm.dyn_state.cac_leakage_table; - u32 i; - u32 v0_loadline; - - if (table == NULL) - return -EINVAL; - - *max = 0; - *min = 0xFFFF; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].vddc > *max) - *max = table->entries[i].vddc; - if (table->entries[i].vddc < *min) - *min = table->entries[i].vddc; - } - - if (si_pi->powertune_data->lkge_lut_v0_percent > 100) - return -EINVAL; - - v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100; - - if (v0_loadline > 0xFFFFUL) - return -EINVAL; - - *min = (u16)v0_loadline; - - if ((*min > *max) || (*max == 0) || (*min == 0)) - return -EINVAL; - - return 0; -} - -static u16 si_get_cac_std_voltage_step(u16 max, u16 min) -{ - return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) / - SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; -} - -static int si_init_dte_leakage_table(struct amdgpu_device *adev, - PP_SIslands_CacConfig *cac_tables, - u16 vddc_max, u16 vddc_min, u16 vddc_step, - u16 t0, u16 t_step) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 leakage; - unsigned int i, j; - s32 t; - u32 smc_leakage; - u32 scaling_factor; - u16 voltage; - - scaling_factor = si_get_smc_power_scaling_factor(adev); - - for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) { - t = (1000 * (i * t_step + t0)); - - for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { - voltage = vddc_max - (vddc_step * j); - - si_calculate_leakage_for_v_and_t(adev, - &si_pi->powertune_data->leakage_coefficients, - voltage, - t, - si_pi->dyn_powertune_data.cac_leakage, - &leakage); - - smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; - - if (smc_leakage > 0xFFFF) - smc_leakage = 0xFFFF; - - cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = - cpu_to_be16((u16)smc_leakage); - } - } - return 0; -} - -static int si_init_simplified_leakage_table(struct amdgpu_device *adev, - PP_SIslands_CacConfig *cac_tables, - u16 vddc_max, u16 vddc_min, u16 vddc_step) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 leakage; - unsigned int i, j; - u32 smc_leakage; - u32 scaling_factor; - u16 voltage; - - scaling_factor = si_get_smc_power_scaling_factor(adev); - - for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { - voltage = vddc_max - (vddc_step * j); - - si_calculate_leakage_for_v(adev, - &si_pi->powertune_data->leakage_coefficients, - si_pi->powertune_data->fixed_kt, - voltage, - si_pi->dyn_powertune_data.cac_leakage, - &leakage); - - smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; - - if (smc_leakage > 0xFFFF) - smc_leakage = 0xFFFF; - - for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) - cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = - cpu_to_be16((u16)smc_leakage); - } - return 0; -} - -static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - PP_SIslands_CacConfig *cac_tables = NULL; - u16 vddc_max, vddc_min, vddc_step; - u16 t0, t_step; - u32 load_line_slope, reg; - int ret = 0; - u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100; - - if (ni_pi->enable_cac == false) - return 0; - - cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL); - if (!cac_tables) - return -ENOMEM; - - reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; - reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); - WREG32(CG_CAC_CTRL, reg); - - si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; - si_pi->dyn_powertune_data.dc_pwr_value = - si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0]; - si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev); - si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default; - - si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000; - - ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min); - if (ret) - goto done_free; - - vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min); - vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)); - t_step = 4; - t0 = 60; - - if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage) - ret = si_init_dte_leakage_table(adev, cac_tables, - vddc_max, vddc_min, vddc_step, - t0, t_step); - else - ret = si_init_simplified_leakage_table(adev, cac_tables, - vddc_max, vddc_min, vddc_step); - if (ret) - goto done_free; - - load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100; - - cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size); - cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate; - cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n; - cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min); - cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step); - cac_tables->R_LL = cpu_to_be32(load_line_slope); - cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime); - cac_tables->calculation_repeats = cpu_to_be32(2); - cac_tables->dc_cac = cpu_to_be32(0); - cac_tables->log2_PG_LKG_SCALE = 12; - cac_tables->cac_temp = si_pi->powertune_data->operating_temp; - cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0); - cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step); - - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start, - (u8 *)cac_tables, - sizeof(PP_SIslands_CacConfig), - si_pi->sram_end); - - if (ret) - goto done_free; - - ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us); - -done_free: - if (ret) { - ni_pi->enable_cac = false; - ni_pi->enable_power_containment = false; - } - - kfree(cac_tables); - - return ret; -} - -static int si_program_cac_config_registers(struct amdgpu_device *adev, - const struct si_cac_config_reg *cac_config_regs) -{ - const struct si_cac_config_reg *config_regs = cac_config_regs; - u32 data = 0, offset; - - if (!config_regs) - return -EINVAL; - - while (config_regs->offset != 0xFFFFFFFF) { - switch (config_regs->type) { - case SISLANDS_CACCONFIG_CGIND: - offset = SMC_CG_IND_START + config_regs->offset; - if (offset < SMC_CG_IND_END) - data = RREG32_SMC(offset); - break; - default: - data = RREG32(config_regs->offset); - break; - } - - data &= ~config_regs->mask; - data |= ((config_regs->value << config_regs->shift) & config_regs->mask); - - switch (config_regs->type) { - case SISLANDS_CACCONFIG_CGIND: - offset = SMC_CG_IND_START + config_regs->offset; - if (offset < SMC_CG_IND_END) - WREG32_SMC(offset, data); - break; - default: - WREG32(config_regs->offset, data); - break; - } - config_regs++; - } - return 0; -} - -static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - int ret; - - if ((ni_pi->enable_cac == false) || - (ni_pi->cac_configuration_required == false)) - return 0; - - ret = si_program_cac_config_registers(adev, si_pi->lcac_config); - if (ret) - return ret; - ret = si_program_cac_config_registers(adev, si_pi->cac_override); - if (ret) - return ret; - ret = si_program_cac_config_registers(adev, si_pi->cac_weights); - if (ret) - return ret; - - return 0; -} - -static int si_enable_smc_cac(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - bool enable) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - PPSMC_Result smc_result; - int ret = 0; - - if (ni_pi->enable_cac) { - if (enable) { - if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { - if (ni_pi->support_cac_long_term_average) { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable); - if (smc_result != PPSMC_Result_OK) - ni_pi->support_cac_long_term_average = false; - } - - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); - if (smc_result != PPSMC_Result_OK) { - ret = -EINVAL; - ni_pi->cac_enabled = false; - } else { - ni_pi->cac_enabled = true; - } - - if (si_pi->enable_dte) { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); - if (smc_result != PPSMC_Result_OK) - ret = -EINVAL; - } - } - } else if (ni_pi->cac_enabled) { - if (si_pi->enable_dte) - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); - - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); - - ni_pi->cac_enabled = false; - - if (ni_pi->support_cac_long_term_average) - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable); - } - } - return ret; -} - -static int si_init_smc_spll_table(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - SMC_SISLANDS_SPLL_DIV_TABLE *spll_table; - SISLANDS_SMC_SCLK_VALUE sclk_params; - u32 fb_div, p_div; - u32 clk_s, clk_v; - u32 sclk = 0; - int ret = 0; - u32 tmp; - int i; - - if (si_pi->spll_table_start == 0) - return -EINVAL; - - spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL); - if (spll_table == NULL) - return -ENOMEM; - - for (i = 0; i < 256; i++) { - ret = si_calculate_sclk_params(adev, sclk, &sclk_params); - if (ret) - break; - p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; - fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; - clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; - clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; - - fb_div &= ~0x00001FFF; - fb_div >>= 1; - clk_v >>= 6; - - if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT)) - ret = -EINVAL; - if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) - ret = -EINVAL; - if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) - ret = -EINVAL; - if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) - ret = -EINVAL; - - if (ret) - break; - - tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) | - ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK); - spll_table->freq[i] = cpu_to_be32(tmp); - - tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) | - ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK); - spll_table->ss[i] = cpu_to_be32(tmp); - - sclk += 512; - } - - - if (!ret) - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start, - (u8 *)spll_table, - sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), - si_pi->sram_end); - - if (ret) - ni_pi->enable_power_containment = false; - - kfree(spll_table); - - return ret; -} - -static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev, - u16 vce_voltage) -{ - u16 highest_leakage = 0; - struct si_power_info *si_pi = si_get_pi(adev); - int i; - - for (i = 0; i < si_pi->leakage_voltage.count; i++){ - if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) - highest_leakage = si_pi->leakage_voltage.entries[i].voltage; - } - - if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage)) - return highest_leakage; - - return vce_voltage; -} - -static int si_get_vce_clock_voltage(struct amdgpu_device *adev, - u32 evclk, u32 ecclk, u16 *voltage) -{ - u32 i; - int ret = -EINVAL; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - if (((evclk == 0) && (ecclk == 0)) || - (table && (table->count == 0))) { - *voltage = 0; - return 0; - } - - for (i = 0; i < table->count; i++) { - if ((evclk <= table->entries[i].evclk) && - (ecclk <= table->entries[i].ecclk)) { - *voltage = table->entries[i].v; - ret = 0; - break; - } - } - - /* if no match return the highest voltage */ - if (ret) - *voltage = table->entries[table->count - 1].v; - - *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage); - - return ret; -} - -static bool si_dpm_vblank_too_short(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we never hit the non-gddr5 limit so disable it */ - u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; - - if (vblank_time < switch_limit) - return true; - else - return false; - -} - -static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, - u32 arb_freq_src, u32 arb_freq_dest) -{ - u32 mc_arb_dram_timing; - u32 mc_arb_dram_timing2; - u32 burst_time; - u32 mc_cg_config; - - switch (arb_freq_src) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; - break; - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; - break; - case MC_CG_ARB_FREQ_F2: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; - break; - case MC_CG_ARB_FREQ_F3: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; - break; - default: - return -EINVAL; - } - - switch (arb_freq_dest) { - case MC_CG_ARB_FREQ_F0: - WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); - break; - case MC_CG_ARB_FREQ_F1: - WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); - break; - case MC_CG_ARB_FREQ_F2: - WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); - break; - case MC_CG_ARB_FREQ_F3: - WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); - break; - default: - return -EINVAL; - } - - mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F; - WREG32(MC_CG_CONFIG, mc_cg_config); - WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK); - - return 0; -} - -static void ni_update_current_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct si_ps *new_ps = si_get_ps(rps); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - - eg_pi->current_rps = *rps; - ni_pi->current_ps = *new_ps; - eg_pi->current_rps.ps_priv = &ni_pi->current_ps; - adev->pm.dpm.current_ps = &eg_pi->current_rps; -} - -static void ni_update_requested_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct si_ps *new_ps = si_get_ps(rps); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - - eg_pi->requested_rps = *rps; - ni_pi->requested_ps = *new_ps; - eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; - adev->pm.dpm.requested_ps = &eg_pi->requested_rps; -} - -static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, - struct amdgpu_ps *new_ps, - struct amdgpu_ps *old_ps) -{ - struct si_ps *new_state = si_get_ps(new_ps); - struct si_ps *current_state = si_get_ps(old_ps); - - if ((new_ps->vclk == old_ps->vclk) && - (new_ps->dclk == old_ps->dclk)) - return; - - if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >= - current_state->performance_levels[current_state->performance_level_count - 1].sclk) - return; - - amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); -} - -static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, - struct amdgpu_ps *new_ps, - struct amdgpu_ps *old_ps) -{ - struct si_ps *new_state = si_get_ps(new_ps); - struct si_ps *current_state = si_get_ps(old_ps); - - if ((new_ps->vclk == old_ps->vclk) && - (new_ps->dclk == old_ps->dclk)) - return; - - if (new_state->performance_levels[new_state->performance_level_count - 1].sclk < - current_state->performance_levels[current_state->performance_level_count - 1].sclk) - return; - - amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); -} - -static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) -{ - unsigned int i; - - for (i = 0; i < table->count; i++) - if (voltage <= table->entries[i].value) - return table->entries[i].value; - - return table->entries[table->count - 1].value; -} - -static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, - u32 max_clock, u32 requested_clock) -{ - unsigned int i; - - if ((clocks == NULL) || (clocks->count == 0)) - return (requested_clock < max_clock) ? requested_clock : max_clock; - - for (i = 0; i < clocks->count; i++) { - if (clocks->values[i] >= requested_clock) - return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; - } - - return (clocks->values[clocks->count - 1] < max_clock) ? - clocks->values[clocks->count - 1] : max_clock; -} - -static u32 btc_get_valid_mclk(struct amdgpu_device *adev, - u32 max_mclk, u32 requested_mclk) -{ - return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, - max_mclk, requested_mclk); -} - -static u32 btc_get_valid_sclk(struct amdgpu_device *adev, - u32 max_sclk, u32 requested_sclk) -{ - return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, - max_sclk, requested_sclk); -} - -static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, - u32 *max_clock) -{ - u32 i, clock = 0; - - if ((table == NULL) || (table->count == 0)) { - *max_clock = clock; - return; - } - - for (i = 0; i < table->count; i++) { - if (clock < table->entries[i].clk) - clock = table->entries[i].clk; - } - *max_clock = clock; -} - -static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, - u32 clock, u16 max_voltage, u16 *voltage) -{ - u32 i; - - if ((table == NULL) || (table->count == 0)) - return; - - for (i= 0; i < table->count; i++) { - if (clock <= table->entries[i].clk) { - if (*voltage < table->entries[i].v) - *voltage = (u16)((table->entries[i].v < max_voltage) ? - table->entries[i].v : max_voltage); - return; - } - } - - *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; -} - -static void btc_adjust_clock_combinations(struct amdgpu_device *adev, - const struct amdgpu_clock_and_voltage_limits *max_limits, - struct rv7xx_pl *pl) -{ - - if ((pl->mclk == 0) || (pl->sclk == 0)) - return; - - if (pl->mclk == pl->sclk) - return; - - if (pl->mclk > pl->sclk) { - if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) - pl->sclk = btc_get_valid_sclk(adev, - max_limits->sclk, - (pl->mclk + - (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / - adev->pm.dpm.dyn_state.mclk_sclk_ratio); - } else { - if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) - pl->mclk = btc_get_valid_mclk(adev, - max_limits->mclk, - pl->sclk - - adev->pm.dpm.dyn_state.sclk_mclk_delta); - } -} - -static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, - u16 max_vddc, u16 max_vddci, - u16 *vddc, u16 *vddci) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - u16 new_voltage; - - if ((0 == *vddc) || (0 == *vddci)) - return; - - if (*vddc > *vddci) { - if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { - new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, - (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); - *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; - } - } else { - if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { - new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, - (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); - *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; - } - } -} - -static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, - u32 *p, u32 *u) -{ - u32 b_c = 0; - u32 i_c; - u32 tmp; - - i_c = (i * r_c) / 100; - tmp = i_c >> p_b; - - while (tmp) { - b_c++; - tmp >>= 1; - } - - *u = (b_c + 1) / 2; - *p = i_c / (1 << (2 * (*u))); -} - -static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) -{ - u32 k, a, ah, al; - u32 t1; - - if ((fl == 0) || (fh == 0) || (fl > fh)) - return -EINVAL; - - k = (100 * fh) / fl; - t1 = (t * (k - 100)); - a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); - a = (a + 5) / 10; - ah = ((a * t) + 5000) / 10000; - al = a - ah; - - *th = t - ah; - *tl = t + al; - - return 0; -} - -static bool r600_is_uvd_state(u32 class, u32 class2) -{ - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return true; - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return true; - return false; -} - -static u8 rv770_get_memory_module_index(struct amdgpu_device *adev) -{ - return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); -} - -static void rv770_get_max_vddc(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - u16 vddc; - - if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc)) - pi->max_vddc = 0; - else - pi->max_vddc = vddc; -} - -static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct amdgpu_atom_ss ss; - - pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_ENGINE_SS, 0); - pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_MEMORY_SS, 0); - - if (pi->sclk_ss || pi->mclk_ss) - pi->dynamic_ss = true; - else - pi->dynamic_ss = false; -} - - -static void si_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct si_ps *ps = si_get_ps(rps); - struct amdgpu_clock_and_voltage_limits *max_limits; - bool disable_mclk_switching = false; - bool disable_sclk_switching = false; - u32 mclk, sclk; - u16 vddc, vddci, min_vce_voltage = 0; - u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; - u32 max_sclk = 0, max_mclk = 0; - int i; - - if (adev->asic_type == CHIP_HAINAN) { - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0xC3) || - (adev->pdev->device == 0x6664) || - (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667)) { - max_sclk = 75000; - } - if ((adev->pdev->revision == 0xC3) || - (adev->pdev->device == 0x6665)) { - max_sclk = 60000; - max_mclk = 80000; - } - } else if (adev->asic_type == CHIP_OLAND) { - if ((adev->pdev->revision == 0xC7) || - (adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605)) { - max_sclk = 75000; - } - } - - if (rps->vce_active) { - rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; - rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; - si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk, - &min_vce_voltage); - } else { - rps->evclk = 0; - rps->ecclk = 0; - } - - if ((adev->pm.dpm.new_active_crtc_count > 1) || - si_dpm_vblank_too_short(adev)) - disable_mclk_switching = true; - - if (rps->vclk || rps->dclk) { - disable_mclk_switching = true; - disable_sclk_switching = true; - } - - if (adev->pm.ac_power) - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - else - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; - - for (i = ps->performance_level_count - 2; i >= 0; i--) { - if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc) - ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc; - } - if (adev->pm.ac_power == false) { - for (i = 0; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].mclk > max_limits->mclk) - ps->performance_levels[i].mclk = max_limits->mclk; - if (ps->performance_levels[i].sclk > max_limits->sclk) - ps->performance_levels[i].sclk = max_limits->sclk; - if (ps->performance_levels[i].vddc > max_limits->vddc) - ps->performance_levels[i].vddc = max_limits->vddc; - if (ps->performance_levels[i].vddci > max_limits->vddci) - ps->performance_levels[i].vddci = max_limits->vddci; - } - } - - /* limit clocks to max supported clocks based on voltage dependency tables */ - btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - &max_sclk_vddc); - btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &max_mclk_vddci); - btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &max_mclk_vddc); - - for (i = 0; i < ps->performance_level_count; i++) { - if (max_sclk_vddc) { - if (ps->performance_levels[i].sclk > max_sclk_vddc) - ps->performance_levels[i].sclk = max_sclk_vddc; - } - if (max_mclk_vddci) { - if (ps->performance_levels[i].mclk > max_mclk_vddci) - ps->performance_levels[i].mclk = max_mclk_vddci; - } - if (max_mclk_vddc) { - if (ps->performance_levels[i].mclk > max_mclk_vddc) - ps->performance_levels[i].mclk = max_mclk_vddc; - } - if (max_mclk) { - if (ps->performance_levels[i].mclk > max_mclk) - ps->performance_levels[i].mclk = max_mclk; - } - if (max_sclk) { - if (ps->performance_levels[i].sclk > max_sclk) - ps->performance_levels[i].sclk = max_sclk; - } - } - - /* XXX validate the min clocks required for display */ - - if (disable_mclk_switching) { - mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; - vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; - } else { - mclk = ps->performance_levels[0].mclk; - vddci = ps->performance_levels[0].vddci; - } - - if (disable_sclk_switching) { - sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; - vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; - } else { - sclk = ps->performance_levels[0].sclk; - vddc = ps->performance_levels[0].vddc; - } - - if (rps->vce_active) { - if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) - sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; - if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) - mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; - } - - /* adjusted low state */ - ps->performance_levels[0].sclk = sclk; - ps->performance_levels[0].mclk = mclk; - ps->performance_levels[0].vddc = vddc; - ps->performance_levels[0].vddci = vddci; - - if (disable_sclk_switching) { - sclk = ps->performance_levels[0].sclk; - for (i = 1; i < ps->performance_level_count; i++) { - if (sclk < ps->performance_levels[i].sclk) - sclk = ps->performance_levels[i].sclk; - } - for (i = 0; i < ps->performance_level_count; i++) { - ps->performance_levels[i].sclk = sclk; - ps->performance_levels[i].vddc = vddc; - } - } else { - for (i = 1; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) - ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; - if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) - ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; - } - } - - if (disable_mclk_switching) { - mclk = ps->performance_levels[0].mclk; - for (i = 1; i < ps->performance_level_count; i++) { - if (mclk < ps->performance_levels[i].mclk) - mclk = ps->performance_levels[i].mclk; - } - for (i = 0; i < ps->performance_level_count; i++) { - ps->performance_levels[i].mclk = mclk; - ps->performance_levels[i].vddci = vddci; - } - } else { - for (i = 1; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk) - ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk; - if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci) - ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci; - } - } - - for (i = 0; i < ps->performance_level_count; i++) - btc_adjust_clock_combinations(adev, max_limits, - &ps->performance_levels[i]); - - for (i = 0; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].vddc < min_vce_voltage) - ps->performance_levels[i].vddc = min_vce_voltage; - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - ps->performance_levels[i].sclk, - max_limits->vddc, &ps->performance_levels[i].vddc); - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - ps->performance_levels[i].mclk, - max_limits->vddci, &ps->performance_levels[i].vddci); - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - ps->performance_levels[i].mclk, - max_limits->vddc, &ps->performance_levels[i].vddc); - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, - adev->clock.current_dispclk, - max_limits->vddc, &ps->performance_levels[i].vddc); - } - - for (i = 0; i < ps->performance_level_count; i++) { - btc_apply_voltage_delta_rules(adev, - max_limits->vddc, max_limits->vddci, - &ps->performance_levels[i].vddc, - &ps->performance_levels[i].vddci); - } - - ps->dc_compatible = true; - for (i = 0; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) - ps->dc_compatible = false; - } -} - -#if 0 -static int si_read_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 *value) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - return amdgpu_si_read_smc_sram_dword(adev, - si_pi->soft_regs_start + reg_offset, value, - si_pi->sram_end); -} -#endif - -static int si_write_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 value) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - return amdgpu_si_write_smc_sram_dword(adev, - si_pi->soft_regs_start + reg_offset, - value, si_pi->sram_end); -} - -static bool si_is_special_1gb_platform(struct amdgpu_device *adev) -{ - bool ret = false; - u32 tmp, width, row, column, bank, density; - bool is_memory_gddr5, is_special; - - tmp = RREG32(MC_SEQ_MISC0); - is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT)); - is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT)) - & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT)); - - WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); - width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; - - tmp = RREG32(MC_ARB_RAMCFG); - row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; - column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; - bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; - - density = (1 << (row + column - 20 + bank)) * width; - - if ((adev->pdev->device == 0x6819) && - is_memory_gddr5 && is_special && (density == 0x400)) - ret = true; - - return ret; -} - -static void si_get_leakage_vddc(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u16 vddc, count = 0; - int i, ret; - - for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) { - ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i); - - if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) { - si_pi->leakage_voltage.entries[count].voltage = vddc; - si_pi->leakage_voltage.entries[count].leakage_index = - SISLANDS_LEAKAGE_INDEX0 + i; - count++; - } - } - si_pi->leakage_voltage.count = count; -} - -static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev, - u32 index, u16 *leakage_voltage) -{ - struct si_power_info *si_pi = si_get_pi(adev); - int i; - - if (leakage_voltage == NULL) - return -EINVAL; - - if ((index & 0xff00) != 0xff00) - return -EINVAL; - - if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1) - return -EINVAL; - - if (index < SISLANDS_LEAKAGE_INDEX0) - return -EINVAL; - - for (i = 0; i < si_pi->leakage_voltage.count; i++) { - if (si_pi->leakage_voltage.entries[i].leakage_index == index) { - *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; - return 0; - } - } - return -EAGAIN; -} - -static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - bool want_thermal_protection; - enum amdgpu_dpm_event_src dpm_event_src; - - switch (sources) { - case 0: - default: - want_thermal_protection = false; - break; - case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): - want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; - break; - case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): - want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; - break; - case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | - (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): - want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; - break; - } - - if (want_thermal_protection) { - WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); - if (pi->thermal_protection) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); - } else { - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); - } -} - -static void si_enable_auto_throttle_source(struct amdgpu_device *adev, - enum amdgpu_dpm_auto_throttle_src source, - bool enable) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - if (enable) { - if (!(pi->active_auto_throttle_sources & (1 << source))) { - pi->active_auto_throttle_sources |= 1 << source; - si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); - } - } else { - if (pi->active_auto_throttle_sources & (1 << source)) { - pi->active_auto_throttle_sources &= ~(1 << source); - si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); - } - } -} - -static void si_start_dpm(struct amdgpu_device *adev) -{ - WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); -} - -static void si_stop_dpm(struct amdgpu_device *adev) -{ - WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); -} - -static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) -{ - if (enable) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); - else - WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); - -} - -#if 0 -static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev, - u32 thermal_level) -{ - PPSMC_Result ret; - - if (thermal_level == 0) { - ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); - if (ret == PPSMC_Result_OK) - return 0; - else - return -EINVAL; - } - return 0; -} - -static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev) -{ - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true); -} -#endif - -#if 0 -static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power) -{ - if (ac_power) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ? - 0 : -EINVAL; - - return 0; -} -#endif - -static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter) -{ - WREG32(SMC_SCRATCH0, parameter); - return amdgpu_si_send_msg_to_smc(adev, msg); -} - -static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev) -{ - if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) - return -EINVAL; - - return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_dpm_force_performance_level(void *handle, - enum amd_dpm_forced_level level) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ps *rps = adev->pm.dpm.current_ps; - struct si_ps *ps = si_get_ps(rps); - u32 levels = ps->performance_level_count; - - if (level == AMD_DPM_FORCED_LEVEL_HIGH) { - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) - return -EINVAL; - - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) - return -EINVAL; - } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) - return -EINVAL; - - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) - return -EINVAL; - } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) - return -EINVAL; - - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) - return -EINVAL; - } - - adev->pm.dpm.forced_level = level; - - return 0; -} - -#if 0 -static int si_set_boot_state(struct amdgpu_device *adev) -{ - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} -#endif - -static int si_set_sw_state(struct amdgpu_device *adev) -{ - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_halt_smc(struct amdgpu_device *adev) -{ - if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK) - return -EINVAL; - - return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_resume_smc(struct amdgpu_device *adev) -{ - if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK) - return -EINVAL; - - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static void si_dpm_start_smc(struct amdgpu_device *adev) -{ - amdgpu_si_program_jump_on_start(adev); - amdgpu_si_start_smc(adev); - amdgpu_si_smc_clock(adev, true); -} - -static void si_dpm_stop_smc(struct amdgpu_device *adev) -{ - amdgpu_si_reset_smc(adev); - amdgpu_si_smc_clock(adev, false); -} - -static int si_process_firmware_header(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_stateTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->state_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_softRegisters, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->soft_regs_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->mc_reg_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_fanTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->fan_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->arb_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->cac_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->dte_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_spllTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->spll_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->papm_cfg_table_start = tmp; - - return ret; -} - -static void si_read_clock_registers(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); - si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); - si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); - si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); - si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); - si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); - si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); - si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); - si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); - si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); - si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); - si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); - si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); - si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); - si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); -} - -static void si_enable_thermal_protection(struct amdgpu_device *adev, - bool enable) -{ - if (enable) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); - else - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); -} - -static void si_enable_acpi_power_management(struct amdgpu_device *adev) -{ - WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); -} - -#if 0 -static int si_enter_ulp_state(struct amdgpu_device *adev) -{ - WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); - - udelay(25000); - - return 0; -} - -static int si_exit_ulp_state(struct amdgpu_device *adev) -{ - int i; - - WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); - - udelay(7000); - - for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(SMC_RESP_0) == 1) - break; - udelay(1000); - } - - return 0; -} -#endif - -static int si_notify_smc_display_change(struct amdgpu_device *adev, - bool has_display) -{ - PPSMC_Msg msg = has_display ? - PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; - - return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static void si_program_response_times(struct amdgpu_device *adev) -{ - u32 voltage_response_time, acpi_delay_time, vbi_time_out; - u32 vddc_dly, acpi_dly, vbi_dly; - u32 reference_clock; - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); - - voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; - - if (voltage_response_time == 0) - voltage_response_time = 1000; - - acpi_delay_time = 15000; - vbi_time_out = 100000; - - reference_clock = amdgpu_asic_get_xclk(adev); - - vddc_dly = (voltage_response_time * reference_clock) / 100; - acpi_dly = (acpi_delay_time * reference_clock) / 100; - vbi_dly = (vbi_time_out * reference_clock) / 100; - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA); -} - -static void si_program_ds_registers(struct amdgpu_device *adev) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - u32 tmp; - - /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */ - if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0) - tmp = 0x10; - else - tmp = 0x1; - - if (eg_pi->sclk_deep_sleep) { - WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); - WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, - ~AUTOSCALE_ON_SS_CLEAR); - } -} - -static void si_program_display_gap(struct amdgpu_device *adev) -{ - u32 tmp, pipe; - int i; - - tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); - if (adev->pm.dpm.new_active_crtc_count > 0) - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); - else - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); - - if (adev->pm.dpm.new_active_crtc_count > 1) - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); - else - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); - - WREG32(CG_DISPLAY_GAP_CNTL, tmp); - - tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); - pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; - - if ((adev->pm.dpm.new_active_crtc_count > 0) && - (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { - /* find the first active crtc */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) - break; - } - if (i == adev->mode_info.num_crtc) - pipe = 0; - else - pipe = i; - - tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; - tmp |= DCCG_DISP1_SLOW_SELECT(pipe); - WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); - } - - /* Setting this to false forces the performance state to low if the crtcs are disabled. - * This can be a problem on PowerXpress systems or if you want to use the card - * for offscreen rendering or compute if there are no crtcs enabled. - */ - si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); -} - -static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - if (enable) { - if (pi->sclk_ss) - WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); - } else { - WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); - WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); - } -} - -static void si_setup_bsp(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - u32 xclk = amdgpu_asic_get_xclk(adev); - - r600_calculate_u_and_p(pi->asi, - xclk, - 16, - &pi->bsp, - &pi->bsu); - - r600_calculate_u_and_p(pi->pasi, - xclk, - 16, - &pi->pbsp, - &pi->pbsu); - - - pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); - pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); - - WREG32(CG_BSP, pi->dsp); -} - -static void si_program_git(struct amdgpu_device *adev) -{ - WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); -} - -static void si_program_tp(struct amdgpu_device *adev) -{ - int i; - enum r600_td td = R600_TD_DFLT; - - for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) - WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); - - if (td == R600_TD_AUTO) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); - else - WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); - - if (td == R600_TD_UP) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); - - if (td == R600_TD_DOWN) - WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); -} - -static void si_program_tpp(struct amdgpu_device *adev) -{ - WREG32(CG_TPC, R600_TPC_DFLT); -} - -static void si_program_sstp(struct amdgpu_device *adev) -{ - WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); -} - -static void si_enable_display_gap(struct amdgpu_device *adev) -{ - u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); - - tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); - tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | - DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); - - tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); - tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | - DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); - WREG32(CG_DISPLAY_GAP_CNTL, tmp); -} - -static void si_program_vc(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - WREG32(CG_FTV, pi->vrc); -} - -static void si_clear_vc(struct amdgpu_device *adev) -{ - WREG32(CG_FTV, 0); -} - -static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) -{ - u8 mc_para_index; - - if (memory_clock < 10000) - mc_para_index = 0; - else if (memory_clock >= 80000) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); - return mc_para_index; -} - -static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) -{ - u8 mc_para_index; - - if (strobe_mode) { - if (memory_clock < 12500) - mc_para_index = 0x00; - else if (memory_clock > 47500) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 10000) / 2500); - } else { - if (memory_clock < 65000) - mc_para_index = 0x00; - else if (memory_clock > 135000) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 60000) / 5000); - } - return mc_para_index; -} - -static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - bool strobe_mode = false; - u8 result = 0; - - if (mclk <= pi->mclk_strobe_mode_threshold) - strobe_mode = true; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) - result = si_get_mclk_frequency_ratio(mclk, strobe_mode); - else - result = si_get_ddr3_mclk_frequency_ratio(mclk); - - if (strobe_mode) - result |= SISLANDS_SMC_STROBE_ENABLE; - - return result; -} - -static int si_upload_firmware(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - amdgpu_si_reset_smc(adev); - amdgpu_si_smc_clock(adev, false); - - return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end); -} - -static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, - const struct atom_voltage_table *table, - const struct amdgpu_phase_shedding_limits_table *limits) -{ - u32 data, num_bits, num_levels; - - if ((table == NULL) || (limits == NULL)) - return false; - - data = table->mask_low; - - num_bits = hweight32(data); - - if (num_bits == 0) - return false; - - num_levels = (1 << num_bits); - - if (table->count != num_levels) - return false; - - if (limits->count != (num_levels - 1)) - return false; - - return true; -} - -static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, - u32 max_voltage_steps, - struct atom_voltage_table *voltage_table) -{ - unsigned int i, diff; - - if (voltage_table->count <= max_voltage_steps) - return; - - diff = voltage_table->count - max_voltage_steps; - - for (i= 0; i < max_voltage_steps; i++) - voltage_table->entries[i] = voltage_table->entries[i + diff]; - - voltage_table->count = max_voltage_steps; -} - -static int si_get_svi2_voltage_table(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, - struct atom_voltage_table *voltage_table) -{ - u32 i; - - if (voltage_dependency_table == NULL) - return -EINVAL; - - voltage_table->mask_low = 0; - voltage_table->phase_delay = 0; - - voltage_table->count = voltage_dependency_table->count; - for (i = 0; i < voltage_table->count; i++) { - voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; - voltage_table->entries[i].smio_low = 0; - } - - return 0; -} - -static int si_construct_voltage_tables(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - int ret; - - if (pi->voltage_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); - if (ret) - return ret; - - if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(adev, - SISLANDS_MAX_NO_VREG_STEPS, - &eg_pi->vddc_voltage_table); - } else if (si_pi->voltage_control_svi2) { - ret = si_get_svi2_voltage_table(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &eg_pi->vddc_voltage_table); - if (ret) - return ret; - } else { - return -EINVAL; - } - - if (eg_pi->vddci_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, - VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table); - if (ret) - return ret; - - if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(adev, - SISLANDS_MAX_NO_VREG_STEPS, - &eg_pi->vddci_voltage_table); - } - if (si_pi->vddci_control_svi2) { - ret = si_get_svi2_voltage_table(adev, - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &eg_pi->vddci_voltage_table); - if (ret) - return ret; - } - - if (pi->mvdd_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, - VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table); - - if (ret) { - pi->mvdd_control = false; - return ret; - } - - if (si_pi->mvdd_voltage_table.count == 0) { - pi->mvdd_control = false; - return -EINVAL; - } - - if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(adev, - SISLANDS_MAX_NO_VREG_STEPS, - &si_pi->mvdd_voltage_table); - } - - if (si_pi->vddc_phase_shed_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table); - if (ret) - si_pi->vddc_phase_shed_control = false; - - if ((si_pi->vddc_phase_shed_table.count == 0) || - (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS)) - si_pi->vddc_phase_shed_control = false; - } - - return 0; -} - -static void si_populate_smc_voltage_table(struct amdgpu_device *adev, - const struct atom_voltage_table *voltage_table, - SISLANDS_SMC_STATETABLE *table) -{ - unsigned int i; - - for (i = 0; i < voltage_table->count; i++) - table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); -} - -static int si_populate_smc_voltage_tables(struct amdgpu_device *adev, - SISLANDS_SMC_STATETABLE *table) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u8 i; - - if (si_pi->voltage_control_svi2) { - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc, - si_pi->svc_gpio_id); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd, - si_pi->svd_gpio_id); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type, - 2); - } else { - if (eg_pi->vddc_voltage_table.count) { - si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table); - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); - - for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { - if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { - table->maxVDDCIndexInPPTable = i; - break; - } - } - } - - if (eg_pi->vddci_voltage_table.count) { - si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table); - - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); - } - - - if (si_pi->mvdd_voltage_table.count) { - si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table); - - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = - cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); - } - - if (si_pi->vddc_phase_shed_control) { - if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { - si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); - - table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = - cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, - (u32)si_pi->vddc_phase_shed_table.phase_delay); - } else { - si_pi->vddc_phase_shed_control = false; - } - } - } - - return 0; -} - -static int si_populate_voltage_value(struct amdgpu_device *adev, - const struct atom_voltage_table *table, - u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - unsigned int i; - - for (i = 0; i < table->count; i++) { - if (value <= table->entries[i].value) { - voltage->index = (u8)i; - voltage->value = cpu_to_be16(table->entries[i].value); - break; - } - } - - if (i >= table->count) - return -EINVAL; - - return 0; -} - -static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, - SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (pi->mvdd_control) { - if (mclk <= pi->mvdd_split_frequency) - voltage->index = 0; - else - voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1; - - voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); - } - return 0; -} - -static int si_get_std_voltage_value(struct amdgpu_device *adev, - SISLANDS_SMC_VOLTAGE_VALUE *voltage, - u16 *std_voltage) -{ - u16 v_index; - bool voltage_found = false; - *std_voltage = be16_to_cpu(voltage->value); - - if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) { - if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) - return -EINVAL; - - for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { - if (be16_to_cpu(voltage->value) == - (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { - voltage_found = true; - if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; - else - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; - break; - } - } - - if (!voltage_found) { - for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { - if (be16_to_cpu(voltage->value) <= - (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { - voltage_found = true; - if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; - else - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; - break; - } - } - } - } else { - if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; - } - } - - return 0; -} - -static int si_populate_std_voltage_value(struct amdgpu_device *adev, - u16 value, u8 index, - SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - voltage->index = index; - voltage->value = cpu_to_be16(value); - - return 0; -} - -static int si_populate_phase_shedding_value(struct amdgpu_device *adev, - const struct amdgpu_phase_shedding_limits_table *limits, - u16 voltage, u32 sclk, u32 mclk, - SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage) -{ - unsigned int i; - - for (i = 0; i < limits->count; i++) { - if ((voltage <= limits->entries[i].voltage) && - (sclk <= limits->entries[i].sclk) && - (mclk <= limits->entries[i].mclk)) - break; - } - - smc_voltage->phase_settings = (u8)i; - - return 0; -} - -static int si_init_arb_table_index(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - tmp &= 0x00FFFFFF; - tmp |= MC_CG_ARB_FREQ_F1 << 24; - - return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start, - tmp, si_pi->sram_end); -} - -static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) -{ - return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -static int si_reset_to_default(struct amdgpu_device *adev) -{ - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_force_switch_to_arb_f0(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - tmp = (tmp >> 24) & 0xff; - - if (tmp == MC_CG_ARB_FREQ_F0) - return 0; - - return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); -} - -static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, - u32 engine_clock) -{ - u32 dram_rows; - u32 dram_refresh_rate; - u32 mc_arb_rfsh_rate; - u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; - - if (tmp >= 4) - dram_rows = 16384; - else - dram_rows = 1 << (tmp + 10); - - dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); - mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; - - return mc_arb_rfsh_rate; -} - -static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs) -{ - u32 dram_timing; - u32 dram_timing2; - u32 burst_time; - - arb_regs->mc_arb_rfsh_rate = - (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); - - amdgpu_atombios_set_engine_dram_timings(adev, - pl->sclk, - pl->mclk); - - dram_timing = RREG32(MC_ARB_DRAM_TIMING); - dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); - burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; - - arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing); - arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2); - arb_regs->mc_arb_burst_time = (u8)burst_time; - - return 0; -} - -static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - unsigned int first_arb_set) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; - int i, ret = 0; - - for (i = 0; i < state->performance_level_count; i++) { - ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs); - if (ret) - break; - ret = amdgpu_si_copy_bytes_to_smc(adev, - si_pi->arb_table_start + - offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i), - (u8 *)&arb_regs, - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), - si_pi->sram_end); - if (ret) - break; - } - - return ret; -} - -static int si_program_memory_timing_parameters(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - return si_do_program_memory_timing_parameters(adev, amdgpu_new_state, - SISLANDS_DRIVER_STATE_ARB_INDEX); -} - -static int si_populate_initial_mvdd_value(struct amdgpu_device *adev, - struct SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (pi->mvdd_control) - return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table, - si_pi->mvdd_bootup_value, voltage); - - return 0; -} - -static int si_populate_smc_initial_state(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_initial_state, - SISLANDS_SMC_STATETABLE *table) -{ - struct si_ps *initial_state = si_get_ps(amdgpu_initial_state); - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u32 reg; - int ret; - - table->initialState.levels[0].mclk.vDLL_CNTL = - cpu_to_be32(si_pi->clock_registers.dll_cntl); - table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = - cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl); - table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl); - table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl); - table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.mpll_func_cntl); - table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = - cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1); - table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = - cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2); - table->initialState.levels[0].mclk.vMPLL_SS = - cpu_to_be32(si_pi->clock_registers.mpll_ss1); - table->initialState.levels[0].mclk.vMPLL_SS2 = - cpu_to_be32(si_pi->clock_registers.mpll_ss2); - - table->initialState.levels[0].mclk.mclk_value = - cpu_to_be32(initial_state->performance_levels[0].mclk); - - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl); - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2); - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3); - table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4); - table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = - cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum); - table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = - cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2); - - table->initialState.levels[0].sclk.sclk_value = - cpu_to_be32(initial_state->performance_levels[0].sclk); - - table->initialState.levels[0].arbRefreshState = - SISLANDS_INITIAL_STATE_ARB_INDEX; - - table->initialState.levels[0].ACIndex = 0; - - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - initial_state->performance_levels[0].vddc, - &table->initialState.levels[0].vddc); - - if (!ret) { - u16 std_vddc; - - ret = si_get_std_voltage_value(adev, - &table->initialState.levels[0].vddc, - &std_vddc); - if (!ret) - si_populate_std_voltage_value(adev, std_vddc, - table->initialState.levels[0].vddc.index, - &table->initialState.levels[0].std_vddc); - } - - if (eg_pi->vddci_control) - si_populate_voltage_value(adev, - &eg_pi->vddci_voltage_table, - initial_state->performance_levels[0].vddci, - &table->initialState.levels[0].vddci); - - if (si_pi->vddc_phase_shed_control) - si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - initial_state->performance_levels[0].vddc, - initial_state->performance_levels[0].sclk, - initial_state->performance_levels[0].mclk, - &table->initialState.levels[0].vddc); - - si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd); - - reg = CG_R(0xffff) | CG_L(0); - table->initialState.levels[0].aT = cpu_to_be32(reg); - table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); - table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - table->initialState.levels[0].strobeMode = - si_get_strobe_mode_settings(adev, - initial_state->performance_levels[0].mclk); - - if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) - table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; - else - table->initialState.levels[0].mcFlags = 0; - } - - table->initialState.levelCount = 1; - - table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; - - table->initialState.levels[0].dpm2.MaxPS = 0; - table->initialState.levels[0].dpm2.NearTDPDec = 0; - table->initialState.levels[0].dpm2.AboveSafeInc = 0; - table->initialState.levels[0].dpm2.BelowSafeInc = 0; - table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0; - - reg = MIN_POWER_MASK | MAX_POWER_MASK; - table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); - - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); - - return 0; -} - -static int si_populate_smc_acpi_state(struct amdgpu_device *adev, - SISLANDS_SMC_STATETABLE *table) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; - u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; - u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; - u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; - u32 dll_cntl = si_pi->clock_registers.dll_cntl; - u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; - u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; - u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; - u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; - u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; - u32 reg; - int ret; - - table->ACPIState = table->initialState; - - table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; - - if (pi->acpi_vddc) { - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - pi->acpi_vddc, &table->ACPIState.levels[0].vddc); - if (!ret) { - u16 std_vddc; - - ret = si_get_std_voltage_value(adev, - &table->ACPIState.levels[0].vddc, &std_vddc); - if (!ret) - si_populate_std_voltage_value(adev, std_vddc, - table->ACPIState.levels[0].vddc.index, - &table->ACPIState.levels[0].std_vddc); - } - table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen; - - if (si_pi->vddc_phase_shed_control) { - si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - pi->acpi_vddc, - 0, - 0, - &table->ACPIState.levels[0].vddc); - } - } else { - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); - if (!ret) { - u16 std_vddc; - - ret = si_get_std_voltage_value(adev, - &table->ACPIState.levels[0].vddc, &std_vddc); - - if (!ret) - si_populate_std_voltage_value(adev, std_vddc, - table->ACPIState.levels[0].vddc.index, - &table->ACPIState.levels[0].std_vddc); - } - table->ACPIState.levels[0].gen2PCIE = - (u8)amdgpu_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - AMDGPU_PCIE_GEN1); - - if (si_pi->vddc_phase_shed_control) - si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - pi->min_vddc_in_table, - 0, - 0, - &table->ACPIState.levels[0].vddc); - } - - if (pi->acpi_vddc) { - if (eg_pi->acpi_vddci) - si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, - eg_pi->acpi_vddci, - &table->ACPIState.levels[0].vddci); - } - - mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; - mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); - - dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); - - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(4); - - table->ACPIState.levels[0].mclk.vDLL_CNTL = - cpu_to_be32(dll_cntl); - table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = - cpu_to_be32(mclk_pwrmgt_cntl); - table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = - cpu_to_be32(mpll_ad_func_cntl); - table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = - cpu_to_be32(mpll_dq_func_cntl); - table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL = - cpu_to_be32(mpll_func_cntl); - table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = - cpu_to_be32(mpll_func_cntl_1); - table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = - cpu_to_be32(mpll_func_cntl_2); - table->ACPIState.levels[0].mclk.vMPLL_SS = - cpu_to_be32(si_pi->clock_registers.mpll_ss1); - table->ACPIState.levels[0].mclk.vMPLL_SS2 = - cpu_to_be32(si_pi->clock_registers.mpll_ss2); - - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = - cpu_to_be32(spll_func_cntl); - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = - cpu_to_be32(spll_func_cntl_2); - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = - cpu_to_be32(spll_func_cntl_3); - table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = - cpu_to_be32(spll_func_cntl_4); - - table->ACPIState.levels[0].mclk.mclk_value = 0; - table->ACPIState.levels[0].sclk.sclk_value = 0; - - si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd); - - if (eg_pi->dynamic_ac_timing) - table->ACPIState.levels[0].ACIndex = 0; - - table->ACPIState.levels[0].dpm2.MaxPS = 0; - table->ACPIState.levels[0].dpm2.NearTDPDec = 0; - table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; - table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; - table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0; - - reg = MIN_POWER_MASK | MAX_POWER_MASK; - table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); - - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); - - return 0; -} - -static int si_populate_ulv_state(struct amdgpu_device *adev, - SISLANDS_SMC_SWSTATE *state) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - u32 sclk_in_sr = 1350; /* ??? */ - int ret; - - ret = si_convert_power_level_to_smc(adev, &ulv->pl, - &state->levels[0]); - if (!ret) { - if (eg_pi->sclk_deep_sleep) { - if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) - state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; - else - state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; - } - if (ulv->one_pcie_lane_in_ulv) - state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1; - state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); - state->levels[0].ACIndex = 1; - state->levels[0].std_vddc = state->levels[0].vddc; - state->levelCount = 1; - - state->flags |= PPSMC_SWSTATE_FLAG_DC; - } - - return ret; -} - -static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; - int ret; - - ret = si_populate_memory_timing_parameters(adev, &ulv->pl, - &arb_regs); - if (ret) - return ret; - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay, - ulv->volt_change_delay); - - ret = amdgpu_si_copy_bytes_to_smc(adev, - si_pi->arb_table_start + - offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX, - (u8 *)&arb_regs, - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), - si_pi->sram_end); - - return ret; -} - -static void si_get_mvdd_configuration(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - pi->mvdd_split_frequency = 30000; -} - -static int si_init_smc_table(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; - const struct si_ulv_param *ulv = &si_pi->ulv; - SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; - int ret; - u32 lane_width; - u32 vr_hot_gpio; - - si_populate_smc_voltage_tables(adev, table); - - switch (adev->pm.int_thermal_type) { - case THERMAL_TYPE_SI: - case THERMAL_TYPE_EMC2103_WITH_INTERNAL: - table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; - break; - case THERMAL_TYPE_NONE: - table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; - break; - default: - table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; - break; - } - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) - table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) { - if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819)) - table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; - } - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) - table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) - table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) - table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { - table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; - vr_hot_gpio = adev->pm.dpm.backbias_response_time; - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio, - vr_hot_gpio); - } - - ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table); - if (ret) - return ret; - - ret = si_populate_smc_acpi_state(adev, table); - if (ret) - return ret; - - table->driverState = table->initialState; - - ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state, - SISLANDS_INITIAL_STATE_ARB_INDEX); - if (ret) - return ret; - - if (ulv->supported && ulv->pl.vddc) { - ret = si_populate_ulv_state(adev, &table->ULVState); - if (ret) - return ret; - - ret = si_program_ulv_memory_timing_parameters(adev); - if (ret) - return ret; - - WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); - WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); - - lane_width = amdgpu_get_pcie_lanes(adev); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); - } else { - table->ULVState = table->initialState; - } - - return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start, - (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), - si_pi->sram_end); -} - -static int si_calculate_sclk_params(struct amdgpu_device *adev, - u32 engine_clock, - SISLANDS_SMC_SCLK_VALUE *sclk) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct atom_clock_dividers dividers; - u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; - u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; - u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; - u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2; - u64 tmp; - u32 reference_clock = adev->clock.spll.reference_freq; - u32 reference_divider; - u32 fbdiv; - int ret; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - engine_clock, false, ÷rs); - if (ret) - return ret; - - reference_divider = 1 + dividers.ref_div; - - tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384; - do_div(tmp, reference_clock); - fbdiv = (u32) tmp; - - spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); - spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); - spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); - - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(2); - - spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; - spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); - spll_func_cntl_3 |= SPLL_DITHEN; - - if (pi->sclk_ss) { - struct amdgpu_atom_ss ss; - u32 vco_freq = engine_clock * dividers.post_div; - - if (amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_ENGINE_SS, vco_freq)) { - u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); - u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); - - cg_spll_spread_spectrum &= ~CLK_S_MASK; - cg_spll_spread_spectrum |= CLK_S(clk_s); - cg_spll_spread_spectrum |= SSEN; - - cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; - cg_spll_spread_spectrum_2 |= CLK_V(clk_v); - } - } - - sclk->sclk_value = engine_clock; - sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl; - sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2; - sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3; - sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4; - sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum; - sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2; - - return 0; -} - -static int si_populate_sclk_value(struct amdgpu_device *adev, - u32 engine_clock, - SISLANDS_SMC_SCLK_VALUE *sclk) -{ - SISLANDS_SMC_SCLK_VALUE sclk_tmp; - int ret; - - ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp); - if (!ret) { - sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value); - sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL); - sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2); - sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3); - sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4); - sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM); - sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2); - } - - return ret; -} - -static int si_populate_mclk_value(struct amdgpu_device *adev, - u32 engine_clock, - u32 memory_clock, - SISLANDS_SMC_MCLK_VALUE *mclk, - bool strobe_mode, - bool dll_state_on) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u32 dll_cntl = si_pi->clock_registers.dll_cntl; - u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; - u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; - u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; - u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; - u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; - u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1; - u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2; - struct atom_mpll_param mpll_param; - int ret; - - ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); - if (ret) - return ret; - - mpll_func_cntl &= ~BWCTRL_MASK; - mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); - - mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); - mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | - CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); - - mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; - mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); - mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | - YCLK_POST_DIV(mpll_param.post_div); - } - - if (pi->mclk_ss) { - struct amdgpu_atom_ss ss; - u32 freq_nom; - u32 tmp; - u32 reference_clock = adev->clock.mpll.reference_freq; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) - freq_nom = memory_clock * 4; - else - freq_nom = memory_clock * 2; - - tmp = freq_nom / reference_clock; - tmp = tmp * tmp; - if (amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_MEMORY_SS, freq_nom)) { - u32 clks = reference_clock * 5 / ss.rate; - u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); - - mpll_ss1 &= ~CLKV_MASK; - mpll_ss1 |= CLKV(clkv); - - mpll_ss2 &= ~CLKS_MASK; - mpll_ss2 |= CLKS(clks); - } - } - - mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; - mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); - - if (dll_state_on) - mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; - else - mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); - - mclk->mclk_value = cpu_to_be32(memory_clock); - mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); - mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); - mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); - mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); - mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); - mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); - mclk->vDLL_CNTL = cpu_to_be32(dll_cntl); - mclk->vMPLL_SS = cpu_to_be32(mpll_ss1); - mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2); - - return 0; -} - -static void si_populate_smc_sp(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct si_ps *ps = si_get_ps(amdgpu_state); - struct rv7xx_power_info *pi = rv770_get_pi(adev); - int i; - - for (i = 0; i < ps->performance_level_count - 1; i++) - smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); - - smc_state->levels[ps->performance_level_count - 1].bSP = - cpu_to_be32(pi->psp); -} - -static int si_convert_power_level_to_smc(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - int ret; - bool dll_state_on; - u16 std_vddc; - bool gmc_pg = false; - - if (eg_pi->pcie_performance_request && - (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID)) - level->gen2PCIE = (u8)si_pi->force_pcie_gen; - else - level->gen2PCIE = (u8)pl->pcie_gen; - - ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk); - if (ret) - return ret; - - level->mcFlags = 0; - - if (pi->mclk_stutter_mode_threshold && - (pl->mclk <= pi->mclk_stutter_mode_threshold) && - !eg_pi->uvd_enabled && - (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && - (adev->pm.dpm.new_active_crtc_count <= 2)) { - level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; - - if (gmc_pg) - level->mcFlags |= SISLANDS_SMC_MC_PG_EN; - } - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - if (pl->mclk > pi->mclk_edc_enable_threshold) - level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; - - if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) - level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG; - - level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); - - if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) { - if (si_get_mclk_frequency_ratio(pl->mclk, true) >= - ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) - dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; - else - dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; - } else { - dll_state_on = false; - } - } else { - level->strobeMode = si_get_strobe_mode_settings(adev, - pl->mclk); - - dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; - } - - ret = si_populate_mclk_value(adev, - pl->sclk, - pl->mclk, - &level->mclk, - (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on); - if (ret) - return ret; - - ret = si_populate_voltage_value(adev, - &eg_pi->vddc_voltage_table, - pl->vddc, &level->vddc); - if (ret) - return ret; - - - ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc); - if (ret) - return ret; - - ret = si_populate_std_voltage_value(adev, std_vddc, - level->vddc.index, &level->std_vddc); - if (ret) - return ret; - - if (eg_pi->vddci_control) { - ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, - pl->vddci, &level->vddci); - if (ret) - return ret; - } - - if (si_pi->vddc_phase_shed_control) { - ret = si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - pl->vddc, - pl->sclk, - pl->mclk, - &level->vddc); - if (ret) - return ret; - } - - level->MaxPoweredUpCU = si_pi->max_cu; - - ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd); - - return ret; -} - -static int si_populate_smc_t(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - u32 a_t; - u32 t_l, t_h; - u32 high_bsp; - int i, ret; - - if (state->performance_level_count >= 9) - return -EINVAL; - - if (state->performance_level_count < 2) { - a_t = CG_R(0xffff) | CG_L(0); - smc_state->levels[0].aT = cpu_to_be32(a_t); - return 0; - } - - smc_state->levels[0].aT = cpu_to_be32(0); - - for (i = 0; i <= state->performance_level_count - 2; i++) { - ret = r600_calculate_at( - (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1), - 100 * R600_AH_DFLT, - state->performance_levels[i + 1].sclk, - state->performance_levels[i].sclk, - &t_l, - &t_h); - - if (ret) { - t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT; - t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; - } - - a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; - a_t |= CG_R(t_l * pi->bsp / 20000); - smc_state->levels[i].aT = cpu_to_be32(a_t); - - high_bsp = (i == state->performance_level_count - 2) ? - pi->pbsp : pi->bsp; - a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); - smc_state->levels[i + 1].aT = cpu_to_be32(a_t); - } - - return 0; -} - -static int si_disable_ulv(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - - if (ulv->supported) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; - - return 0; -} - -static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - const struct si_power_info *si_pi = si_get_pi(adev); - const struct si_ulv_param *ulv = &si_pi->ulv; - const struct si_ps *state = si_get_ps(amdgpu_state); - int i; - - if (state->performance_levels[0].mclk != ulv->pl.mclk) - return false; - - /* XXX validate against display requirements! */ - - for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { - if (adev->clock.current_dispclk <= - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { - if (ulv->pl.vddc < - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) - return false; - } - } - - if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0)) - return false; - - return true; -} - -static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - const struct si_power_info *si_pi = si_get_pi(adev); - const struct si_ulv_param *ulv = &si_pi->ulv; - - if (ulv->supported) { - if (si_is_state_ulv_compatible(adev, amdgpu_new_state)) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; - } - return 0; -} - -static int si_convert_power_state_to_smc(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - int i, ret; - u32 threshold; - u32 sclk_in_sr = 1350; /* ??? */ - - if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS) - return -EINVAL; - - threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100; - - if (amdgpu_state->vclk && amdgpu_state->dclk) { - eg_pi->uvd_enabled = true; - if (eg_pi->smu_uvd_hs) - smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD; - } else { - eg_pi->uvd_enabled = false; - } - - if (state->dc_compatible) - smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; - - smc_state->levelCount = 0; - for (i = 0; i < state->performance_level_count; i++) { - if (eg_pi->sclk_deep_sleep) { - if ((i == 0) || si_pi->sclk_deep_sleep_above_low) { - if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) - smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; - else - smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; - } - } - - ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i], - &smc_state->levels[i]); - smc_state->levels[i].arbRefreshState = - (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i); - - if (ret) - return ret; - - if (ni_pi->enable_power_containment) - smc_state->levels[i].displayWatermark = - (state->performance_levels[i].sclk < threshold) ? - PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; - else - smc_state->levels[i].displayWatermark = (i < 2) ? - PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; - - if (eg_pi->dynamic_ac_timing) - smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i; - else - smc_state->levels[i].ACIndex = 0; - - smc_state->levelCount++; - } - - si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_watermark_threshold, - threshold / 512); - - si_populate_smc_sp(adev, amdgpu_state, smc_state); - - ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state); - if (ret) - ni_pi->enable_power_containment = false; - - ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); - if (ret) - ni_pi->enable_sq_ramping = false; - - return si_populate_smc_t(adev, amdgpu_state, smc_state); -} - -static int si_upload_sw_state(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *new_state = si_get_ps(amdgpu_new_state); - int ret; - u32 address = si_pi->state_table_start + - offsetof(SISLANDS_SMC_STATETABLE, driverState); - u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) + - ((new_state->performance_level_count - 1) * - sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL)); - SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState; - - memset(smc_state, 0, state_size); - - ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state); - if (ret) - return ret; - - return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, - state_size, si_pi->sram_end); -} - -static int si_upload_ulv_state(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - int ret = 0; - - if (ulv->supported && ulv->pl.vddc) { - u32 address = si_pi->state_table_start + - offsetof(SISLANDS_SMC_STATETABLE, ULVState); - SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState; - u32 state_size = sizeof(SISLANDS_SMC_SWSTATE); - - memset(smc_state, 0, state_size); - - ret = si_populate_ulv_state(adev, smc_state); - if (!ret) - ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, - state_size, si_pi->sram_end); - } - - return ret; -} - -static int si_upload_smc_data(struct amdgpu_device *adev) -{ - struct amdgpu_crtc *amdgpu_crtc = NULL; - int i; - - if (adev->pm.dpm.new_active_crtc_count == 0) - return 0; - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) { - amdgpu_crtc = adev->mode_info.crtcs[i]; - break; - } - } - - if (amdgpu_crtc == NULL) - return 0; - - if (amdgpu_crtc->line_time <= 0) - return 0; - - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_crtc_index, - amdgpu_crtc->crtc_id) != PPSMC_Result_OK) - return 0; - - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, - amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; - - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, - amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; - - return 0; -} - -static int si_set_mc_special_registers(struct amdgpu_device *adev, - struct si_mc_reg_table *table) -{ - u8 i, j, k; - u32 temp_reg; - - for (i = 0, j = table->last; i < table->last; i++) { - if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - switch (table->mc_reg_address[i].s1) { - case MC_SEQ_MISC1: - temp_reg = RREG32(MC_PMG_CMD_EMRS); - table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS; - table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP; - for (k = 0; k < table->num_entries; k++) - table->mc_reg_table_entry[k].mc_data[j] = - ((temp_reg & 0xffff0000)) | - ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); - j++; - - if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - temp_reg = RREG32(MC_PMG_CMD_MRS); - table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS; - table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) - table->mc_reg_table_entry[k].mc_data[j] |= 0x100; - } - j++; - - if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { - if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; - table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD; - for (k = 0; k < table->num_entries; k++) - table->mc_reg_table_entry[k].mc_data[j] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; - j++; - } - break; - case MC_SEQ_RESERVE_M: - temp_reg = RREG32(MC_PMG_CMD_MRS1); - table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1; - table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP; - for(k = 0; k < table->num_entries; k++) - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - j++; - break; - default: - break; - } - } - - table->last = j; - - return 0; -} - -static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) -{ - bool result = true; - switch (in_reg) { - case MC_SEQ_RAS_TIMING: - *out_reg = MC_SEQ_RAS_TIMING_LP; - break; - case MC_SEQ_CAS_TIMING: - *out_reg = MC_SEQ_CAS_TIMING_LP; - break; - case MC_SEQ_MISC_TIMING: - *out_reg = MC_SEQ_MISC_TIMING_LP; - break; - case MC_SEQ_MISC_TIMING2: - *out_reg = MC_SEQ_MISC_TIMING2_LP; - break; - case MC_SEQ_RD_CTL_D0: - *out_reg = MC_SEQ_RD_CTL_D0_LP; - break; - case MC_SEQ_RD_CTL_D1: - *out_reg = MC_SEQ_RD_CTL_D1_LP; - break; - case MC_SEQ_WR_CTL_D0: - *out_reg = MC_SEQ_WR_CTL_D0_LP; - break; - case MC_SEQ_WR_CTL_D1: - *out_reg = MC_SEQ_WR_CTL_D1_LP; - break; - case MC_PMG_CMD_EMRS: - *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; - break; - case MC_PMG_CMD_MRS: - *out_reg = MC_SEQ_PMG_CMD_MRS_LP; - break; - case MC_PMG_CMD_MRS1: - *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; - break; - case MC_SEQ_PMG_TIMING: - *out_reg = MC_SEQ_PMG_TIMING_LP; - break; - case MC_PMG_CMD_MRS2: - *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; - break; - case MC_SEQ_WR_CTL_2: - *out_reg = MC_SEQ_WR_CTL_2_LP; - break; - default: - result = false; - break; - } - - return result; -} - -static void si_set_valid_flag(struct si_mc_reg_table *table) -{ - u8 i, j; - - for (i = 0; i < table->last; i++) { - for (j = 1; j < table->num_entries; j++) { - if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { - table->valid_flag |= 1 << i; - break; - } - } - } -} - -static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) -{ - u32 i; - u16 address; - - for (i = 0; i < table->last; i++) - table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? - address : table->mc_reg_address[i].s1; - -} - -static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, - struct si_mc_reg_table *si_table) -{ - u8 i, j; - - if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - if (table->num_entries > MAX_AC_TIMING_ENTRIES) - return -EINVAL; - - for (i = 0; i < table->last; i++) - si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; - si_table->last = table->last; - - for (i = 0; i < table->num_entries; i++) { - si_table->mc_reg_table_entry[i].mclk_max = - table->mc_reg_table_entry[i].mclk_max; - for (j = 0; j < table->last; j++) { - si_table->mc_reg_table_entry[i].mc_data[j] = - table->mc_reg_table_entry[i].mc_data[j]; - } - } - si_table->num_entries = table->num_entries; - - return 0; -} - -static int si_initialize_mc_reg_table(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct atom_mc_reg_table *table; - struct si_mc_reg_table *si_table = &si_pi->mc_reg_table; - u8 module_index = rv770_get_memory_module_index(adev); - int ret; - - table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); - if (!table) - return -ENOMEM; - - WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); - WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); - WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); - WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); - WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); - WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); - WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); - WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); - WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); - WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); - WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); - WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); - WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); - WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); - - ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); - if (ret) - goto init_mc_done; - - ret = si_copy_vbios_mc_reg_table(table, si_table); - if (ret) - goto init_mc_done; - - si_set_s0_mc_reg_index(si_table); - - ret = si_set_mc_special_registers(adev, si_table); - if (ret) - goto init_mc_done; - - si_set_valid_flag(si_table); - -init_mc_done: - kfree(table); - - return ret; - -} - -static void si_populate_mc_reg_addresses(struct amdgpu_device *adev, - SMC_SIslands_MCRegisters *mc_reg_table) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 i, j; - - for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { - if (si_pi->mc_reg_table.valid_flag & (1 << j)) { - if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - break; - mc_reg_table->address[i].s0 = - cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); - mc_reg_table->address[i].s1 = - cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1); - i++; - } - } - mc_reg_table->last = (u8)i; -} - -static void si_convert_mc_registers(const struct si_mc_reg_entry *entry, - SMC_SIslands_MCRegisterSet *data, - u32 num_entries, u32 valid_flag) -{ - u32 i, j; - - for(i = 0, j = 0; j < num_entries; j++) { - if (valid_flag & (1 << j)) { - data->value[i] = cpu_to_be32(entry->mc_data[j]); - i++; - } - } -} - -static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SMC_SIslands_MCRegisterSet *mc_reg_table_data) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 i = 0; - - for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) { - if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) - break; - } - - if ((i == si_pi->mc_reg_table.num_entries) && (i > 0)) - --i; - - si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i], - mc_reg_table_data, si_pi->mc_reg_table.last, - si_pi->mc_reg_table.valid_flag); -} - -static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SMC_SIslands_MCRegisters *mc_reg_table) -{ - struct si_ps *state = si_get_ps(amdgpu_state); - int i; - - for (i = 0; i < state->performance_level_count; i++) { - si_convert_mc_reg_table_entry_to_smc(adev, - &state->performance_levels[i], - &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]); - } -} - -static int si_populate_mc_reg_table(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_boot_state) -{ - struct si_ps *boot_state = si_get_ps(amdgpu_boot_state); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; - - memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1); - - si_populate_mc_reg_addresses(adev, smc_mc_reg_table); - - si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0], - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]); - - si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT], - si_pi->mc_reg_table.last, - si_pi->mc_reg_table.valid_flag); - - if (ulv->supported && ulv->pl.vddc != 0) - si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl, - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]); - else - si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT], - si_pi->mc_reg_table.last, - si_pi->mc_reg_table.valid_flag); - - si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table); - - return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start, - (u8 *)smc_mc_reg_table, - sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end); -} - -static int si_upload_mc_reg_table(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - struct si_ps *new_state = si_get_ps(amdgpu_new_state); - struct si_power_info *si_pi = si_get_pi(adev); - u32 address = si_pi->mc_reg_table_start + - offsetof(SMC_SIslands_MCRegisters, - data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]); - SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; - - memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); - - si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); - - return amdgpu_si_copy_bytes_to_smc(adev, address, - (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], - sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, - si_pi->sram_end); -} - -static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) -{ - if (enable) - WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); - else - WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); -} - -static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct si_ps *state = si_get_ps(amdgpu_state); - int i; - u16 pcie_speed, max_speed = 0; - - for (i = 0; i < state->performance_level_count; i++) { - pcie_speed = state->performance_levels[i].pcie_gen; - if (max_speed < pcie_speed) - max_speed = pcie_speed; - } - return max_speed; -} - -static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) -{ - u32 speed_cntl; - - speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; - speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; - - return (u16)speed_cntl; -} - -static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); - enum amdgpu_pcie_gen current_link_speed; - - if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) - current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state); - else - current_link_speed = si_pi->force_pcie_gen; - - si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; - si_pi->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch (target_link_speed) { -#if defined(CONFIG_ACPI) - case AMDGPU_PCIE_GEN3: - if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) - break; - si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; - if (current_link_speed == AMDGPU_PCIE_GEN2) - break; - /* fall through */ - case AMDGPU_PCIE_GEN2: - if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) - break; -#endif - /* fall through */ - default: - si_pi->force_pcie_gen = si_get_current_pcie_speed(adev); - break; - } - } else { - if (target_link_speed < current_link_speed) - si_pi->pspp_notify_required = true; - } -} - -static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); - u8 request; - - if (si_pi->pspp_notify_required) { - if (target_link_speed == AMDGPU_PCIE_GEN3) - request = PCIE_PERF_REQ_PECI_GEN3; - else if (target_link_speed == AMDGPU_PCIE_GEN2) - request = PCIE_PERF_REQ_PECI_GEN2; - else - request = PCIE_PERF_REQ_PECI_GEN1; - - if ((request == PCIE_PERF_REQ_PECI_GEN1) && - (si_get_current_pcie_speed(adev) > 0)) - return; - -#if defined(CONFIG_ACPI) - amdgpu_acpi_pcie_performance_request(adev, request, false); -#endif - } -} - -#if 0 -static int si_ds_request(struct amdgpu_device *adev, - bool ds_status_on, u32 count_write) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - - if (eg_pi->sclk_deep_sleep) { - if (ds_status_on) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) == - PPSMC_Result_OK) ? - 0 : -EINVAL; - else - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) == - PPSMC_Result_OK) ? 0 : -EINVAL; - } - return 0; -} -#endif - -static void si_set_max_cu_value(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - if (adev->asic_type == CHIP_VERDE) { - switch (adev->pdev->device) { - case 0x6820: - case 0x6825: - case 0x6821: - case 0x6823: - case 0x6827: - si_pi->max_cu = 10; - break; - case 0x682D: - case 0x6824: - case 0x682F: - case 0x6826: - si_pi->max_cu = 8; - break; - case 0x6828: - case 0x6830: - case 0x6831: - case 0x6838: - case 0x6839: - case 0x683D: - si_pi->max_cu = 10; - break; - case 0x683B: - case 0x683F: - case 0x6829: - si_pi->max_cu = 8; - break; - default: - si_pi->max_cu = 0; - break; - } - } else { - si_pi->max_cu = 0; - } -} - -static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *table) -{ - u32 i; - int j; - u16 leakage_voltage; - - if (table) { - for (i = 0; i < table->count; i++) { - switch (si_get_leakage_voltage_from_leakage_index(adev, - table->entries[i].v, - &leakage_voltage)) { - case 0: - table->entries[i].v = leakage_voltage; - break; - case -EAGAIN: - return -EINVAL; - case -EINVAL: - default: - break; - } - } - - for (j = (table->count - 2); j >= 0; j--) { - table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? - table->entries[j].v : table->entries[j + 1].v; - } - } - return 0; -} - -static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev) -{ - int ret = 0; - - ret = si_patch_single_dependency_table_based_on_leakage(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); - if (ret) - DRM_ERROR("Could not patch vddc_on_sclk leakage table\n"); - ret = si_patch_single_dependency_table_based_on_leakage(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); - if (ret) - DRM_ERROR("Could not patch vddc_on_mclk leakage table\n"); - ret = si_patch_single_dependency_table_based_on_leakage(adev, - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); - if (ret) - DRM_ERROR("Could not patch vddci_on_mclk leakage table\n"); - return ret; -} - -static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - u32 lane_width; - u32 new_lane_width = - ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - u32 current_lane_width = - ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - - if (new_lane_width != current_lane_width) { - amdgpu_set_pcie_lanes(adev, new_lane_width); - lane_width = amdgpu_get_pcie_lanes(adev); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); - } -} - -static void si_dpm_setup_asic(struct amdgpu_device *adev) -{ - si_read_clock_registers(adev); - si_enable_acpi_power_management(adev); -} - -static int si_thermal_enable_alert(struct amdgpu_device *adev, - bool enable) -{ - u32 thermal_int = RREG32(CG_THERMAL_INT); - - if (enable) { - PPSMC_Result result; - - thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); - WREG32(CG_THERMAL_INT, thermal_int); - result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); - if (result != PPSMC_Result_OK) { - DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); - return -EINVAL; - } - } else { - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; - WREG32(CG_THERMAL_INT, thermal_int); - } - - return 0; -} - -static int si_thermal_set_temperature_range(struct amdgpu_device *adev, - int min_temp, int max_temp) -{ - int low_temp = 0 * 1000; - int high_temp = 255 * 1000; - - if (low_temp < min_temp) - low_temp = min_temp; - if (high_temp > max_temp) - high_temp = max_temp; - if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); - return -EINVAL; - } - - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); - WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); - - adev->pm.dpm.thermal.min_temp = low_temp; - adev->pm.dpm.thermal.max_temp = high_temp; - - return 0; -} - -static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - - if (si_pi->fan_ctrl_is_in_default_mode) { - tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; - si_pi->fan_ctrl_default_mode = tmp; - tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; - si_pi->t_min = tmp; - si_pi->fan_ctrl_is_in_default_mode = false; - } - - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(0); - WREG32(CG_FDO_CTRL2, tmp); - - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(mode); - WREG32(CG_FDO_CTRL2, tmp); -} - -static int si_thermal_setup_fan_table(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE }; - u32 duty100; - u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; - u16 fdo_min, slope1, slope2; - u32 reference_clock, tmp; - int ret; - u64 tmp64; - - if (!si_pi->fan_table_start) { - adev->pm.dpm.fan.ucode_fan_control = false; - return 0; - } - - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - - if (duty100 == 0) { - adev->pm.dpm.fan.ucode_fan_control = false; - return 0; - } - - tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; - do_div(tmp64, 10000); - fdo_min = (u16)tmp64; - - t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; - t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; - - pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; - pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; - - slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); - fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); - fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); - fan_table.slope1 = cpu_to_be16(slope1); - fan_table.slope2 = cpu_to_be16(slope2); - fan_table.fdo_min = cpu_to_be16(fdo_min); - fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); - fan_table.hys_up = cpu_to_be16(1); - fan_table.hys_slope = cpu_to_be16(1); - fan_table.temp_resp_lim = cpu_to_be16(5); - reference_clock = amdgpu_asic_get_xclk(adev); - - fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * - reference_clock) / 1600); - fan_table.fdo_max = cpu_to_be16((u16)duty100); - - tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; - fan_table.temp_src = (uint8_t)tmp; - - ret = amdgpu_si_copy_bytes_to_smc(adev, - si_pi->fan_table_start, - (u8 *)(&fan_table), - sizeof(fan_table), - si_pi->sram_end); - - if (ret) { - DRM_ERROR("Failed to load fan table to the SMC."); - adev->pm.dpm.fan.ucode_fan_control = false; - } - - return ret; -} - -static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - PPSMC_Result ret; - - ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl); - if (ret == PPSMC_Result_OK) { - si_pi->fan_is_controlled_by_smc = true; - return 0; - } else { - return -EINVAL; - } -} - -static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - PPSMC_Result ret; - - ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl); - - if (ret == PPSMC_Result_OK) { - si_pi->fan_is_controlled_by_smc = false; - return 0; - } else { - return -EINVAL; - } -} - -static int si_dpm_get_fan_speed_percent(void *handle, - u32 *speed) -{ - u32 duty, duty100; - u64 tmp64; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.no_fan) - return -ENOENT; - - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (u64)duty * 100; - do_div(tmp64, duty100); - *speed = (u32)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -static int si_dpm_set_fan_speed_percent(void *handle, - u32 speed) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - u32 duty, duty100; - u64 tmp64; - - if (adev->pm.no_fan) - return -ENOENT; - - if (si_pi->fan_is_controlled_by_smc) - return -EINVAL; - - if (speed > 100) - return -EINVAL; - - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (u64)speed * duty100; - do_div(tmp64, 100); - duty = (u32)tmp64; - - tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; - tmp |= FDO_STATIC_DUTY(duty); - WREG32(CG_FDO_CTRL0, tmp); - - return 0; -} - -static void si_dpm_set_fan_control_mode(void *handle, u32 mode) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (mode) { - /* stop auto-manage */ - if (adev->pm.dpm.fan.ucode_fan_control) - si_fan_ctrl_stop_smc_fan_control(adev); - si_fan_ctrl_set_static_mode(adev, mode); - } else { - /* restart auto-manage */ - if (adev->pm.dpm.fan.ucode_fan_control) - si_thermal_start_smc_fan_control(adev); - else - si_fan_ctrl_set_default_mode(adev); - } -} - -static u32 si_dpm_get_fan_control_mode(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - - if (si_pi->fan_is_controlled_by_smc) - return 0; - - tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; - return (tmp >> FDO_PWM_MODE_SHIFT); -} - -#if 0 -static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, - u32 *speed) -{ - u32 tach_period; - u32 xclk = amdgpu_asic_get_xclk(adev); - - if (adev->pm.no_fan) - return -ENOENT; - - if (adev->pm.fan_pulses_per_revolution == 0) - return -ENOENT; - - tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; - if (tach_period == 0) - return -ENOENT; - - *speed = 60 * xclk * 10000 / tach_period; - - return 0; -} - -static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, - u32 speed) -{ - u32 tach_period, tmp; - u32 xclk = amdgpu_asic_get_xclk(adev); - - if (adev->pm.no_fan) - return -ENOENT; - - if (adev->pm.fan_pulses_per_revolution == 0) - return -ENOENT; - - if ((speed < adev->pm.fan_min_rpm) || - (speed > adev->pm.fan_max_rpm)) - return -EINVAL; - - if (adev->pm.dpm.fan.ucode_fan_control) - si_fan_ctrl_stop_smc_fan_control(adev); - - tach_period = 60 * xclk * 10000 / (8 * speed); - tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; - tmp |= TARGET_PERIOD(tach_period); - WREG32(CG_TACH_CTRL, tmp); - - si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); - - return 0; -} -#endif - -static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - - if (!si_pi->fan_ctrl_is_in_default_mode) { - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); - WREG32(CG_FDO_CTRL2, tmp); - - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(si_pi->t_min); - WREG32(CG_FDO_CTRL2, tmp); - si_pi->fan_ctrl_is_in_default_mode = true; - } -} - -static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev) -{ - if (adev->pm.dpm.fan.ucode_fan_control) { - si_fan_ctrl_start_smc_fan_control(adev); - si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); - } -} - -static void si_thermal_initialize(struct amdgpu_device *adev) -{ - u32 tmp; - - if (adev->pm.fan_pulses_per_revolution) { - tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; - tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); - WREG32(CG_TACH_CTRL, tmp); - } - - tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; - tmp |= TACH_PWM_RESP_RATE(0x28); - WREG32(CG_FDO_CTRL2, tmp); -} - -static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) -{ - int ret; - - si_thermal_initialize(adev); - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - if (adev->pm.dpm.fan.ucode_fan_control) { - ret = si_halt_smc(adev); - if (ret) - return ret; - ret = si_thermal_setup_fan_table(adev); - if (ret) - return ret; - ret = si_resume_smc(adev); - if (ret) - return ret; - si_thermal_start_smc_fan_control(adev); - } - - return 0; -} - -static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev) -{ - if (!adev->pm.no_fan) { - si_fan_ctrl_set_default_mode(adev); - si_fan_ctrl_stop_smc_fan_control(adev); - } -} - -static int si_dpm_enable(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; - int ret; - - if (amdgpu_si_is_smc_running(adev)) - return -EINVAL; - if (pi->voltage_control || si_pi->voltage_control_svi2) - si_enable_voltage_control(adev, true); - if (pi->mvdd_control) - si_get_mvdd_configuration(adev); - if (pi->voltage_control || si_pi->voltage_control_svi2) { - ret = si_construct_voltage_tables(adev); - if (ret) { - DRM_ERROR("si_construct_voltage_tables failed\n"); - return ret; - } - } - if (eg_pi->dynamic_ac_timing) { - ret = si_initialize_mc_reg_table(adev); - if (ret) - eg_pi->dynamic_ac_timing = false; - } - if (pi->dynamic_ss) - si_enable_spread_spectrum(adev, true); - if (pi->thermal_protection) - si_enable_thermal_protection(adev, true); - si_setup_bsp(adev); - si_program_git(adev); - si_program_tp(adev); - si_program_tpp(adev); - si_program_sstp(adev); - si_enable_display_gap(adev); - si_program_vc(adev); - ret = si_upload_firmware(adev); - if (ret) { - DRM_ERROR("si_upload_firmware failed\n"); - return ret; - } - ret = si_process_firmware_header(adev); - if (ret) { - DRM_ERROR("si_process_firmware_header failed\n"); - return ret; - } - ret = si_initial_switch_from_arb_f0_to_f1(adev); - if (ret) { - DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n"); - return ret; - } - ret = si_init_smc_table(adev); - if (ret) { - DRM_ERROR("si_init_smc_table failed\n"); - return ret; - } - ret = si_init_smc_spll_table(adev); - if (ret) { - DRM_ERROR("si_init_smc_spll_table failed\n"); - return ret; - } - ret = si_init_arb_table_index(adev); - if (ret) { - DRM_ERROR("si_init_arb_table_index failed\n"); - return ret; - } - if (eg_pi->dynamic_ac_timing) { - ret = si_populate_mc_reg_table(adev, boot_ps); - if (ret) { - DRM_ERROR("si_populate_mc_reg_table failed\n"); - return ret; - } - } - ret = si_initialize_smc_cac_tables(adev); - if (ret) { - DRM_ERROR("si_initialize_smc_cac_tables failed\n"); - return ret; - } - ret = si_initialize_hardware_cac_manager(adev); - if (ret) { - DRM_ERROR("si_initialize_hardware_cac_manager failed\n"); - return ret; - } - ret = si_initialize_smc_dte_tables(adev); - if (ret) { - DRM_ERROR("si_initialize_smc_dte_tables failed\n"); - return ret; - } - ret = si_populate_smc_tdp_limits(adev, boot_ps); - if (ret) { - DRM_ERROR("si_populate_smc_tdp_limits failed\n"); - return ret; - } - ret = si_populate_smc_tdp_limits_2(adev, boot_ps); - if (ret) { - DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n"); - return ret; - } - si_program_response_times(adev); - si_program_ds_registers(adev); - si_dpm_start_smc(adev); - ret = si_notify_smc_display_change(adev, false); - if (ret) { - DRM_ERROR("si_notify_smc_display_change failed\n"); - return ret; - } - si_enable_sclk_control(adev, true); - si_start_dpm(adev); - - si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); - si_thermal_start_thermal_controller(adev); - - return 0; -} - -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - -static void si_dpm_disable(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; - - if (!amdgpu_si_is_smc_running(adev)) - return; - si_thermal_stop_thermal_controller(adev); - si_disable_ulv(adev); - si_clear_vc(adev); - if (pi->thermal_protection) - si_enable_thermal_protection(adev, false); - si_enable_power_containment(adev, boot_ps, false); - si_enable_smc_cac(adev, boot_ps, false); - si_enable_spread_spectrum(adev, false); - si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); - si_stop_dpm(adev); - si_reset_to_default(adev); - si_dpm_stop_smc(adev); - si_force_switch_to_arb_f0(adev); - - ni_update_current_ps(adev, boot_ps); -} - -static int si_dpm_pre_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; - struct amdgpu_ps *new_ps = &requested_ps; - - ni_update_requested_ps(adev, new_ps); - si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); - - return 0; -} - -static int si_power_control_set_level(struct amdgpu_device *adev) -{ - struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps; - int ret; - - ret = si_restrict_performance_levels_before_switch(adev); - if (ret) - return ret; - ret = si_halt_smc(adev); - if (ret) - return ret; - ret = si_populate_smc_tdp_limits(adev, new_ps); - if (ret) - return ret; - ret = si_populate_smc_tdp_limits_2(adev, new_ps); - if (ret) - return ret; - ret = si_resume_smc(adev); - if (ret) - return ret; - ret = si_set_sw_state(adev); - if (ret) - return ret; - return 0; -} - -static int si_dpm_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *new_ps = &eg_pi->requested_rps; - struct amdgpu_ps *old_ps = &eg_pi->current_rps; - int ret; - - ret = si_disable_ulv(adev); - if (ret) { - DRM_ERROR("si_disable_ulv failed\n"); - return ret; - } - ret = si_restrict_performance_levels_before_switch(adev); - if (ret) { - DRM_ERROR("si_restrict_performance_levels_before_switch failed\n"); - return ret; - } - if (eg_pi->pcie_performance_request) - si_request_link_speed_change_before_state_change(adev, new_ps, old_ps); - ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps); - ret = si_enable_power_containment(adev, new_ps, false); - if (ret) { - DRM_ERROR("si_enable_power_containment failed\n"); - return ret; - } - ret = si_enable_smc_cac(adev, new_ps, false); - if (ret) { - DRM_ERROR("si_enable_smc_cac failed\n"); - return ret; - } - ret = si_halt_smc(adev); - if (ret) { - DRM_ERROR("si_halt_smc failed\n"); - return ret; - } - ret = si_upload_sw_state(adev, new_ps); - if (ret) { - DRM_ERROR("si_upload_sw_state failed\n"); - return ret; - } - ret = si_upload_smc_data(adev); - if (ret) { - DRM_ERROR("si_upload_smc_data failed\n"); - return ret; - } - ret = si_upload_ulv_state(adev); - if (ret) { - DRM_ERROR("si_upload_ulv_state failed\n"); - return ret; - } - if (eg_pi->dynamic_ac_timing) { - ret = si_upload_mc_reg_table(adev, new_ps); - if (ret) { - DRM_ERROR("si_upload_mc_reg_table failed\n"); - return ret; - } - } - ret = si_program_memory_timing_parameters(adev, new_ps); - if (ret) { - DRM_ERROR("si_program_memory_timing_parameters failed\n"); - return ret; - } - si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps); - - ret = si_resume_smc(adev); - if (ret) { - DRM_ERROR("si_resume_smc failed\n"); - return ret; - } - ret = si_set_sw_state(adev); - if (ret) { - DRM_ERROR("si_set_sw_state failed\n"); - return ret; - } - ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps); - if (eg_pi->pcie_performance_request) - si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); - ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps); - if (ret) { - DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n"); - return ret; - } - ret = si_enable_smc_cac(adev, new_ps, true); - if (ret) { - DRM_ERROR("si_enable_smc_cac failed\n"); - return ret; - } - ret = si_enable_power_containment(adev, new_ps, true); - if (ret) { - DRM_ERROR("si_enable_power_containment failed\n"); - return ret; - } - - ret = si_power_control_set_level(adev); - if (ret) { - DRM_ERROR("si_power_control_set_level failed\n"); - return ret; - } - - return 0; -} - -static void si_dpm_post_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *new_ps = &eg_pi->requested_rps; - - ni_update_current_ps(adev, new_ps); -} - -#if 0 -void si_dpm_reset_asic(struct amdgpu_device *adev) -{ - si_restrict_performance_levels_before_switch(adev); - si_disable_ulv(adev); - si_set_boot_state(adev); -} -#endif - -static void si_dpm_display_configuration_changed(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_program_display_gap(adev); -} - - -static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, - u8 table_rev) -{ - rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); - rps->class = le16_to_cpu(non_clock_info->usClassification); - rps->class2 = le16_to_cpu(non_clock_info->usClassification2); - - if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { - rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); - rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else if (r600_is_uvd_state(rps->class, rps->class2)) { - rps->vclk = RV770_DEFAULT_VCLK_FREQ; - rps->dclk = RV770_DEFAULT_DCLK_FREQ; - } else { - rps->vclk = 0; - rps->dclk = 0; - } - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) - adev->pm.dpm.boot_ps = rps; - if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - adev->pm.dpm.uvd_ps = rps; -} - -static void si_parse_pplib_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, int index, - union pplib_clock_info *clock_info) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *ps = si_get_ps(rps); - u16 leakage_voltage; - struct rv7xx_pl *pl = &ps->performance_levels[index]; - int ret; - - ps->performance_level_count = index + 1; - - pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); - pl->sclk |= clock_info->si.ucEngineClockHigh << 16; - pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); - pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; - - pl->vddc = le16_to_cpu(clock_info->si.usVDDC); - pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); - pl->flags = le32_to_cpu(clock_info->si.ulFlags); - pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - clock_info->si.ucPCIEGen); - - /* patch up vddc if necessary */ - ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, - &leakage_voltage); - if (ret == 0) - pl->vddc = leakage_voltage; - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { - pi->acpi_vddc = pl->vddc; - eg_pi->acpi_vddci = pl->vddci; - si_pi->acpi_pcie_gen = pl->pcie_gen; - } - - if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && - index == 0) { - /* XXX disable for A0 tahiti */ - si_pi->ulv.supported = false; - si_pi->ulv.pl = *pl; - si_pi->ulv.one_pcie_lane_in_ulv = false; - si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; - si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT; - si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT; - } - - if (pi->min_vddc_in_table > pl->vddc) - pi->min_vddc_in_table = pl->vddc; - - if (pi->max_vddc_in_table < pl->vddc) - pi->max_vddc_in_table = pl->vddc; - - /* patch up boot state */ - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { - u16 vddc, vddci, mvdd; - amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd); - pl->mclk = adev->clock.default_mclk; - pl->sclk = adev->clock.default_sclk; - pl->vddc = vddc; - pl->vddci = vddci; - si_pi->mvdd_bootup_value = mvdd; - } - - if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == - ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; - } -} - -union pplib_power_state { - struct _ATOM_PPLIB_STATE v1; - struct _ATOM_PPLIB_STATE_V2 v2; -}; - -static int si_parse_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - union pplib_power_state *power_state; - int i, j, k, non_clock_array_index, clock_array_index; - union pplib_clock_info *clock_info; - struct _StateArray *state_array; - struct _ClockInfoArray *clock_info_array; - struct _NonClockInfoArray *non_clock_info_array; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - u8 *power_state_offset; - struct si_ps *ps; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - amdgpu_add_thermal_controller(adev); - - state_array = (struct _StateArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct _ClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct _NonClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - - adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, - sizeof(struct amdgpu_ps), - GFP_KERNEL); - if (!adev->pm.dpm.ps) - return -ENOMEM; - power_state_offset = (u8 *)state_array->states; - for (i = 0; i < state_array->ucNumEntries; i++) { - u8 *idx; - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); - return -ENOMEM; - } - adev->pm.dpm.ps[i].ps_priv = ps; - si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, - non_clock_info_array->ucEntrySize); - k = 0; - idx = (u8 *)&power_state->v2.clockInfoIndex[0]; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = idx[j]; - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) - break; - clock_info = (union pplib_clock_info *) - ((u8 *)&clock_info_array->clockInfo[0] + - (clock_array_index * clock_info_array->ucEntrySize)); - si_parse_pplib_clock_info(adev, - &adev->pm.dpm.ps[i], k, - clock_info); - k++; - } - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; - } - adev->pm.dpm.num_ps = state_array->ucNumEntries; - - /* fill in the vce power states */ - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - u32 sclk, mclk; - clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; - sclk = le16_to_cpu(clock_info->si.usEngineClockLow); - sclk |= clock_info->si.ucEngineClockHigh << 16; - mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); - mclk |= clock_info->si.ucMemoryClockHigh << 16; - adev->pm.dpm.vce_states[i].sclk = sclk; - adev->pm.dpm.vce_states[i].mclk = mclk; - } - - return 0; -} - -static int si_dpm_init(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi; - struct evergreen_power_info *eg_pi; - struct ni_power_info *ni_pi; - struct si_power_info *si_pi; - struct atom_clock_dividers dividers; - int ret; - - si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); - if (si_pi == NULL) - return -ENOMEM; - adev->pm.dpm.priv = si_pi; - ni_pi = &si_pi->ni; - eg_pi = &ni_pi->eg; - pi = &eg_pi->rv7xx; - - si_pi->sys_pcie_mask = - adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK; - si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; - si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); - - si_set_max_cu_value(adev); - - rv770_get_max_vddc(adev); - si_get_leakage_vddc(adev); - si_patch_dependency_tables_based_on_leakage(adev); - - pi->acpi_vddc = 0; - eg_pi->acpi_vddci = 0; - pi->min_vddc_in_table = 0; - pi->max_vddc_in_table = 0; - - ret = amdgpu_get_platform_caps(adev); - if (ret) - return ret; - - ret = amdgpu_parse_extended_power_table(adev); - if (ret) - return ret; - - ret = si_parse_power_table(adev); - if (ret) - return ret; - - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kcalloc(4, - sizeof(struct amdgpu_clock_voltage_dependency_entry), - GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; - - if (adev->pm.dpm.voltage_response_time == 0) - adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; - if (adev->pm.dpm.backbias_response_time == 0) - adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - 0, false, ÷rs); - if (ret) - pi->ref_div = dividers.ref_div + 1; - else - pi->ref_div = R600_REFERENCEDIVIDER_DFLT; - - eg_pi->smu_uvd_hs = false; - - pi->mclk_strobe_mode_threshold = 40000; - if (si_is_special_1gb_platform(adev)) - pi->mclk_stutter_mode_threshold = 0; - else - pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold; - pi->mclk_edc_enable_threshold = 40000; - eg_pi->mclk_edc_wr_enable_threshold = 40000; - - ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; - - pi->voltage_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - VOLTAGE_OBJ_GPIO_LUT); - if (!pi->voltage_control) { - si_pi->voltage_control_svi2 = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - VOLTAGE_OBJ_SVID2); - if (si_pi->voltage_control_svi2) - amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - &si_pi->svd_gpio_id, &si_pi->svc_gpio_id); - } - - pi->mvdd_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC, - VOLTAGE_OBJ_GPIO_LUT); - - eg_pi->vddci_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, - VOLTAGE_OBJ_GPIO_LUT); - if (!eg_pi->vddci_control) - si_pi->vddci_control_svi2 = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, - VOLTAGE_OBJ_SVID2); - - si_pi->vddc_phase_shed_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - VOLTAGE_OBJ_PHASE_LUT); - - rv770_get_engine_memory_ss(adev); - - pi->asi = RV770_ASI_DFLT; - pi->pasi = CYPRESS_HASI_DFLT; - pi->vrc = SISLANDS_VRC_DFLT; - - pi->gfx_clock_gating = true; - - eg_pi->sclk_deep_sleep = true; - si_pi->sclk_deep_sleep_above_low = false; - - if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) - pi->thermal_protection = true; - else - pi->thermal_protection = false; - - eg_pi->dynamic_ac_timing = true; - - eg_pi->light_sleep = true; -#if defined(CONFIG_ACPI) - eg_pi->pcie_performance_request = - amdgpu_acpi_is_pcie_performance_request_supported(adev); -#else - eg_pi->pcie_performance_request = false; -#endif - - si_pi->sram_end = SMC_RAM_END; - - adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; - adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; - adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; - adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; - adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; - adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; - adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; - - si_initialize_powertune_defaults(adev); - - /* make sure dc limits are valid */ - if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || - (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - - si_pi->fan_ctrl_is_in_default_mode = true; - - return 0; -} - -static void si_dpm_fini(struct amdgpu_device *adev) -{ - int i; - - if (adev->pm.dpm.ps) - for (i = 0; i < adev->pm.dpm.num_ps; i++) - kfree(adev->pm.dpm.ps[i].ps_priv); - kfree(adev->pm.dpm.ps); - kfree(adev->pm.dpm.priv); - kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); - amdgpu_free_extended_power_table(adev); -} - -static void si_dpm_debugfs_print_current_performance_level(void *handle, - struct seq_file *m) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *rps = &eg_pi->current_rps; - struct si_ps *ps = si_get_ps(rps); - struct rv7xx_pl *pl; - u32 current_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; - - if (current_index >= ps->performance_level_count) { - seq_printf(m, "invalid dpm profile %d\n", current_index); - } else { - pl = &ps->performance_levels[current_index]; - seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", - current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); - } -} - -static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - u32 cg_thermal_int; - - switch (type) { - case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - default: - break; - } - break; - - case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - default: - break; - } - break; - - default: - break; - } - return 0; -} - -static int si_dpm_process_interrupt(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - bool queue_thermal = false; - - if (entry == NULL) - return -EINVAL; - - switch (entry->src_id) { - case 230: /* thermal low to high */ - DRM_DEBUG("IH: thermal low to high\n"); - adev->pm.dpm.thermal.high_to_low = false; - queue_thermal = true; - break; - case 231: /* thermal high to low */ - DRM_DEBUG("IH: thermal high to low\n"); - adev->pm.dpm.thermal.high_to_low = true; - queue_thermal = true; - break; - default: - break; - } - - if (queue_thermal) - schedule_work(&adev->pm.dpm.thermal.work); - - return 0; -} - -static int si_dpm_late_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif - return 0; -} - -/** - * si_dpm_init_microcode - load ucode images from disk - * - * @adev: amdgpu_device pointer - * - * Use the firmware interface to load the ucode images into - * the driver (not loaded into hw). - * Returns 0 on success, error on failure. - */ -static int si_dpm_init_microcode(struct amdgpu_device *adev) -{ - const char *chip_name; - char fw_name[30]; - int err; - - DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_TAHITI: - chip_name = "tahiti"; - break; - case CHIP_PITCAIRN: - if ((adev->pdev->revision == 0x81) && - ((adev->pdev->device == 0x6810) || - (adev->pdev->device == 0x6811))) - chip_name = "pitcairn_k"; - else - chip_name = "pitcairn"; - break; - case CHIP_VERDE: - if (((adev->pdev->device == 0x6820) && - ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83))) || - ((adev->pdev->device == 0x6821) && - ((adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87))) || - ((adev->pdev->revision == 0x87) && - ((adev->pdev->device == 0x6823) || - (adev->pdev->device == 0x682b)))) - chip_name = "verde_k"; - else - chip_name = "verde"; - break; - case CHIP_OLAND: - if (((adev->pdev->revision == 0x81) && - ((adev->pdev->device == 0x6600) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605) || - (adev->pdev->device == 0x6610))) || - ((adev->pdev->revision == 0x83) && - (adev->pdev->device == 0x6610))) - chip_name = "oland_k"; - else - chip_name = "oland"; - break; - case CHIP_HAINAN: - if (((adev->pdev->revision == 0x81) && - (adev->pdev->device == 0x6660)) || - ((adev->pdev->revision == 0x83) && - ((adev->pdev->device == 0x6660) || - (adev->pdev->device == 0x6663) || - (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667)))) - chip_name = "hainan_k"; - else if ((adev->pdev->revision == 0xc3) && - (adev->pdev->device == 0x6665)) - chip_name = "banks_k_2"; - else - chip_name = "hainan"; - break; - default: BUG(); - } - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n", - err, fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; - -} - -static int si_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - /* default to balanced state */ - adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; - adev->pm.default_sclk = adev->clock.default_sclk; - adev->pm.default_mclk = adev->clock.default_mclk; - adev->pm.current_sclk = adev->clock.default_sclk; - adev->pm.current_mclk = adev->clock.default_mclk; - adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - - if (amdgpu_dpm == 0) - return 0; - - ret = si_dpm_init_microcode(adev); - if (ret) - return ret; - - INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); - mutex_lock(&adev->pm.mutex); - ret = si_dpm_init(adev); - if (ret) - goto dpm_failed; - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - if (amdgpu_dpm == 1) - amdgpu_pm_print_power_states(adev); - mutex_unlock(&adev->pm.mutex); - DRM_INFO("amdgpu: dpm initialized\n"); - - return 0; - -dpm_failed: - si_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - DRM_ERROR("amdgpu: dpm initialization failed\n"); - return ret; -} - -static int si_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - flush_work(&adev->pm.dpm.thermal.work); - - mutex_lock(&adev->pm.mutex); - si_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static int si_dpm_hw_init(void *handle) -{ - int ret; - - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!amdgpu_dpm) - return 0; - - mutex_lock(&adev->pm.mutex); - si_dpm_setup_asic(adev); - ret = si_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); - return ret; -} - -static int si_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - si_dpm_disable(adev); - mutex_unlock(&adev->pm.mutex); - } - - return 0; -} - -static int si_dpm_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - /* disable dpm */ - si_dpm_disable(adev); - /* reset the power state */ - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - mutex_unlock(&adev->pm.mutex); - } - return 0; -} - -static int si_dpm_resume(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - /* asic init will reset to the boot state */ - mutex_lock(&adev->pm.mutex); - si_dpm_setup_asic(adev); - ret = si_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - if (adev->pm.dpm_enabled) - amdgpu_pm_compute_clocks(adev); - } - return 0; -} - -static bool si_dpm_is_idle(void *handle) -{ - /* XXX */ - return true; -} - -static int si_dpm_wait_for_idle(void *handle) -{ - /* XXX */ - return 0; -} - -static int si_dpm_soft_reset(void *handle) -{ - return 0; -} - -static int si_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int si_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -/* get temperature in millidegrees */ -static int si_dpm_get_temp(void *handle) -{ - u32 temp; - int actual_temp = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> - CTF_TEMP_SHIFT; - - if (temp & 0x200) - actual_temp = 255; - else - actual_temp = temp & 0x1ff; - - actual_temp = (actual_temp * 1000); - - return actual_temp; -} - -static u32 si_dpm_get_sclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); - - if (low) - return requested_state->performance_levels[0].sclk; - else - return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; -} - -static u32 si_dpm_get_mclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); - - if (low) - return requested_state->performance_levels[0].mclk; - else - return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; -} - -static void si_dpm_print_power_state(void *handle, - void *current_ps) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps; - struct si_ps *ps = si_get_ps(rps); - struct rv7xx_pl *pl; - int i; - - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - for (i = 0; i < ps->performance_level_count; i++) { - pl = &ps->performance_levels[i]; - if (adev->asic_type >= CHIP_TAHITI) - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", - i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); - else - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", - i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); - } - amdgpu_dpm_print_ps_status(adev, rps); -} - -static int si_dpm_early_init(void *handle) -{ - - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->powerplay.pp_funcs = &si_dpm_funcs; - adev->powerplay.pp_handle = adev; - si_dpm_set_irq_funcs(adev); - return 0; -} - -static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1, - const struct rv7xx_pl *si_cpl2) -{ - return ((si_cpl1->mclk == si_cpl2->mclk) && - (si_cpl1->sclk == si_cpl2->sclk) && - (si_cpl1->pcie_gen == si_cpl2->pcie_gen) && - (si_cpl1->vddc == si_cpl2->vddc) && - (si_cpl1->vddci == si_cpl2->vddci)); -} - -static int si_check_state_equal(void *handle, - void *current_ps, - void *request_ps, - bool *equal) -{ - struct si_ps *si_cps; - struct si_ps *si_rps; - int i; - struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; - struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) - return -EINVAL; - - si_cps = si_get_ps((struct amdgpu_ps *)cps); - si_rps = si_get_ps((struct amdgpu_ps *)rps); - - if (si_cps == NULL) { - printk("si_cps is NULL\n"); - *equal = false; - return 0; - } - - if (si_cps->performance_level_count != si_rps->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < si_cps->performance_level_count; i++) { - if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]), - &(si_rps->performance_levels[i]))) { - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); - *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); - - return 0; -} - -static int si_dpm_read_sensor(void *handle, int idx, - void *value, int *size) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *rps = &eg_pi->current_rps; - struct si_ps *ps = si_get_ps(rps); - uint32_t sclk, mclk; - u32 pl_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; - - /* size must be at least 4 bytes for all sensors */ - if (*size < 4) - return -EINVAL; - - switch (idx) { - case AMDGPU_PP_SENSOR_GFX_SCLK: - if (pl_index < ps->performance_level_count) { - sclk = ps->performance_levels[pl_index].sclk; - *((uint32_t *)value) = sclk; - *size = 4; - return 0; - } - return -EINVAL; - case AMDGPU_PP_SENSOR_GFX_MCLK: - if (pl_index < ps->performance_level_count) { - mclk = ps->performance_levels[pl_index].mclk; - *((uint32_t *)value) = mclk; - *size = 4; - return 0; - } - return -EINVAL; - case AMDGPU_PP_SENSOR_GPU_TEMP: - *((uint32_t *)value) = si_dpm_get_temp(adev); - *size = 4; - return 0; - default: - return -EINVAL; - } -} - -static const struct amd_ip_funcs si_dpm_ip_funcs = { - .name = "si_dpm", - .early_init = si_dpm_early_init, - .late_init = si_dpm_late_init, - .sw_init = si_dpm_sw_init, - .sw_fini = si_dpm_sw_fini, - .hw_init = si_dpm_hw_init, - .hw_fini = si_dpm_hw_fini, - .suspend = si_dpm_suspend, - .resume = si_dpm_resume, - .is_idle = si_dpm_is_idle, - .wait_for_idle = si_dpm_wait_for_idle, - .soft_reset = si_dpm_soft_reset, - .set_clockgating_state = si_dpm_set_clockgating_state, - .set_powergating_state = si_dpm_set_powergating_state, -}; - -const struct amdgpu_ip_block_version si_smu_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 6, - .minor = 0, - .rev = 0, - .funcs = &si_dpm_ip_funcs, -}; - -static const struct amd_pm_funcs si_dpm_funcs = { - .pre_set_power_state = &si_dpm_pre_set_power_state, - .set_power_state = &si_dpm_set_power_state, - .post_set_power_state = &si_dpm_post_set_power_state, - .display_configuration_changed = &si_dpm_display_configuration_changed, - .get_sclk = &si_dpm_get_sclk, - .get_mclk = &si_dpm_get_mclk, - .print_power_state = &si_dpm_print_power_state, - .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, - .force_performance_level = &si_dpm_force_performance_level, - .vblank_too_short = &si_dpm_vblank_too_short, - .set_fan_control_mode = &si_dpm_set_fan_control_mode, - .get_fan_control_mode = &si_dpm_get_fan_control_mode, - .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, - .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, - .check_state_equal = &si_check_state_equal, - .get_vce_clock_state = amdgpu_get_vce_clock_state, - .read_sensor = &si_dpm_read_sensor, -}; - -static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { - .set = si_dpm_set_interrupt_state, - .process = si_dpm_process_interrupt, -}; - -static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) -{ - adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; - adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; -} - diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h deleted file mode 100644 index 6b7d292b919f..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h +++ /dev/null @@ -1,1015 +0,0 @@ -/* - * Copyright 2012 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SI_DPM_H__ -#define __SI_DPM_H__ - -#include "amdgpu_atombios.h" -#include "sislands_smc.h" - -#define MC_CG_CONFIG 0x96f -#define MC_ARB_CG 0x9fa -#define CG_ARB_REQ(x) ((x) << 0) -#define CG_ARB_REQ_MASK (0xff << 0) - -#define MC_ARB_DRAM_TIMING_1 0x9fc -#define MC_ARB_DRAM_TIMING_2 0x9fd -#define MC_ARB_DRAM_TIMING_3 0x9fe -#define MC_ARB_DRAM_TIMING2_1 0x9ff -#define MC_ARB_DRAM_TIMING2_2 0xa00 -#define MC_ARB_DRAM_TIMING2_3 0xa01 - -#define MAX_NO_OF_MVDD_VALUES 2 -#define MAX_NO_VREG_STEPS 32 -#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 -#define RV770_ASI_DFLT 1000 -#define CYPRESS_HASI_DFLT 400000 -#define PCIE_PERF_REQ_PECI_GEN1 2 -#define PCIE_PERF_REQ_PECI_GEN2 3 -#define PCIE_PERF_REQ_PECI_GEN3 4 -#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */ -#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */ - -#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 - -#define RV770_SMC_TABLE_ADDRESS 0xB000 -#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 - -#define SMC_STROBE_RATIO 0x0F -#define SMC_STROBE_ENABLE 0x10 - -#define SMC_MC_EDC_RD_FLAG 0x01 -#define SMC_MC_EDC_WR_FLAG 0x02 -#define SMC_MC_RTT_ENABLE 0x04 -#define SMC_MC_STUTTER_EN 0x08 - -#define RV770_SMC_VOLTAGEMASK_VDDC 0 -#define RV770_SMC_VOLTAGEMASK_MVDD 1 -#define RV770_SMC_VOLTAGEMASK_VDDCI 2 -#define RV770_SMC_VOLTAGEMASK_MAX 4 - -#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define NISLANDS_SMC_STROBE_RATIO 0x0F -#define NISLANDS_SMC_STROBE_ENABLE 0x10 - -#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 -#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 -#define NISLANDS_SMC_MC_RTT_ENABLE 0x04 -#define NISLANDS_SMC_MC_STUTTER_EN 0x08 - -#define MAX_NO_VREG_STEPS 32 - -#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 -#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 -#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 -#define NISLANDS_SMC_VOLTAGEMASK_MAX 4 - -#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0 -#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1 -#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 -#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3 - -#define SISLANDS_LEAKAGE_INDEX0 0xff01 -#define SISLANDS_MAX_LEAKAGE_COUNT 4 - -#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5 -#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 -#define SISLANDS_ACPI_STATE_ARB_INDEX 1 -#define SISLANDS_ULV_STATE_ARB_INDEX 2 -#define SISLANDS_DRIVER_STATE_ARB_INDEX 3 - -#define SISLANDS_DPM2_MAX_PULSE_SKIP 256 - -#define SISLANDS_DPM2_NEAR_TDP_DEC 10 -#define SISLANDS_DPM2_ABOVE_SAFE_INC 5 -#define SISLANDS_DPM2_BELOW_SAFE_INC 20 - -#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80 - -#define SISLANDS_DPM2_MAXPS_PERCENT_H 99 -#define SISLANDS_DPM2_MAXPS_PERCENT_M 99 - -#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF -#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12 -#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 -#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E -#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF - -#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10 - -#define SISLANDS_VRC_DFLT 0xC000B3 -#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687 -#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035 -#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550 - -#define SI_ASI_DFLT 10000 -#define SI_BSP_DFLT 0x41EB -#define SI_BSU_DFLT 0x2 -#define SI_AH_DFLT 5 -#define SI_RLP_DFLT 25 -#define SI_RMP_DFLT 65 -#define SI_LHP_DFLT 40 -#define SI_LMP_DFLT 15 -#define SI_TD_DFLT 0 -#define SI_UTC_DFLT_00 0x24 -#define SI_UTC_DFLT_01 0x22 -#define SI_UTC_DFLT_02 0x22 -#define SI_UTC_DFLT_03 0x22 -#define SI_UTC_DFLT_04 0x22 -#define SI_UTC_DFLT_05 0x22 -#define SI_UTC_DFLT_06 0x22 -#define SI_UTC_DFLT_07 0x22 -#define SI_UTC_DFLT_08 0x22 -#define SI_UTC_DFLT_09 0x22 -#define SI_UTC_DFLT_10 0x22 -#define SI_UTC_DFLT_11 0x22 -#define SI_UTC_DFLT_12 0x22 -#define SI_UTC_DFLT_13 0x22 -#define SI_UTC_DFLT_14 0x22 -#define SI_DTC_DFLT_00 0x24 -#define SI_DTC_DFLT_01 0x22 -#define SI_DTC_DFLT_02 0x22 -#define SI_DTC_DFLT_03 0x22 -#define SI_DTC_DFLT_04 0x22 -#define SI_DTC_DFLT_05 0x22 -#define SI_DTC_DFLT_06 0x22 -#define SI_DTC_DFLT_07 0x22 -#define SI_DTC_DFLT_08 0x22 -#define SI_DTC_DFLT_09 0x22 -#define SI_DTC_DFLT_10 0x22 -#define SI_DTC_DFLT_11 0x22 -#define SI_DTC_DFLT_12 0x22 -#define SI_DTC_DFLT_13 0x22 -#define SI_DTC_DFLT_14 0x22 -#define SI_VRC_DFLT 0x0000C003 -#define SI_VOLTAGERESPONSETIME_DFLT 1000 -#define SI_BACKBIASRESPONSETIME_DFLT 1000 -#define SI_VRU_DFLT 0x3 -#define SI_SPLLSTEPTIME_DFLT 0x1000 -#define SI_SPLLSTEPUNIT_DFLT 0x3 -#define SI_TPU_DFLT 0 -#define SI_TPC_DFLT 0x200 -#define SI_SSTU_DFLT 0 -#define SI_SST_DFLT 0x00C8 -#define SI_GICST_DFLT 0x200 -#define SI_FCT_DFLT 0x0400 -#define SI_FCTU_DFLT 0 -#define SI_CTXCGTT3DRPHC_DFLT 0x20 -#define SI_CTXCGTT3DRSDC_DFLT 0x40 -#define SI_VDDC3DOORPHC_DFLT 0x100 -#define SI_VDDC3DOORSDC_DFLT 0x7 -#define SI_VDDC3DOORSU_DFLT 0 -#define SI_MPLLLOCKTIME_DFLT 100 -#define SI_MPLLRESETTIME_DFLT 150 -#define SI_VCOSTEPPCT_DFLT 20 -#define SI_ENDINGVCOSTEPPCT_DFLT 5 -#define SI_REFERENCEDIVIDER_DFLT 4 - -#define SI_PM_NUMBER_OF_TC 15 -#define SI_PM_NUMBER_OF_SCLKS 20 -#define SI_PM_NUMBER_OF_MCLKS 4 -#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4 -#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3 - -/* XXX are these ok? */ -#define SI_TEMP_RANGE_MIN (90 * 1000) -#define SI_TEMP_RANGE_MAX (120 * 1000) - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - -enum ni_dc_cac_level -{ - NISLANDS_DCCAC_LEVEL_0 = 0, - NISLANDS_DCCAC_LEVEL_1, - NISLANDS_DCCAC_LEVEL_2, - NISLANDS_DCCAC_LEVEL_3, - NISLANDS_DCCAC_LEVEL_4, - NISLANDS_DCCAC_LEVEL_5, - NISLANDS_DCCAC_LEVEL_6, - NISLANDS_DCCAC_LEVEL_7, - NISLANDS_DCCAC_MAX_LEVELS -}; - -enum si_cac_config_reg_type -{ - SISLANDS_CACCONFIG_MMR = 0, - SISLANDS_CACCONFIG_CGIND, - SISLANDS_CACCONFIG_MAX -}; - -enum si_power_level { - SI_POWER_LEVEL_LOW = 0, - SI_POWER_LEVEL_MEDIUM = 1, - SI_POWER_LEVEL_HIGH = 2, - SI_POWER_LEVEL_CTXSW = 3, -}; - -enum si_td { - SI_TD_AUTO, - SI_TD_UP, - SI_TD_DOWN, -}; - -enum si_display_watermark { - SI_DISPLAY_WATERMARK_LOW = 0, - SI_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum si_display_gap -{ - SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - SI_PM_DISPLAY_GAP_VBLANK = 1, - SI_PM_DISPLAY_GAP_WATERMARK = 2, - SI_PM_DISPLAY_GAP_IGNORE = 3, -}; - -extern const struct amdgpu_ip_block_version si_smu_ip_block; - -struct ni_leakage_coeffients -{ - u32 at; - u32 bt; - u32 av; - u32 bv; - s32 t_slope; - s32 t_intercept; - u32 t_ref; -}; - -struct SMC_Evergreen_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; - -struct evergreen_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct evergreen_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct SMC_Evergreen_MCRegisterSet -{ - uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; - -struct SMC_Evergreen_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; - SMC_Evergreen_MCRegisterSet data[5]; -}; - -typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; - -struct SMC_NIslands_MCRegisterSet -{ - uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; - -struct ni_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct SMC_NIslands_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; - -struct SMC_NIslands_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; - SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; -}; - -typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; - -struct evergreen_ulv_param { - bool supported; - struct rv7xx_pl *pl; -}; - -struct evergreen_arb_registers { - u32 mc_arb_dram_timing; - u32 mc_arb_dram_timing2; - u32 mc_arb_rfsh_rate; - u32 mc_arb_burst_time; -}; - -struct at { - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; -}; - -struct ni_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct RV770_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; - -struct RV770_SMC_MCLK_VALUE -{ - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; - - -struct RV730_SMC_MCLK_VALUE -{ - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL2; - uint32_t vMPLL_FUNC_CNTL3; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; - -struct RV770_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; - -union RV7XX_SMC_MCLK_VALUE -{ - RV770_SMC_MCLK_VALUE mclk770; - RV730_SMC_MCLK_VALUE mclk730; -}; - -typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; - -struct RV770_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - union{ - uint8_t seqValue; - uint8_t ACIndex; - }; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t gen2XSP; - uint8_t backbias; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - RV770_SMC_SCLK_VALUE sclk; - RV7XX_SMC_MCLK_VALUE mclk; - RV770_SMC_VOLTAGE_VALUE vddc; - RV770_SMC_VOLTAGE_VALUE mvdd; - RV770_SMC_VOLTAGE_VALUE vddci; - uint8_t reserved1; - uint8_t reserved2; - uint8_t stateFlags; - uint8_t padding; -}; - -typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; - -struct RV770_SMC_SWSTATE -{ - uint8_t flags; - uint8_t padding1; - uint8_t padding2; - uint8_t padding3; - RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; -}; - -typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; - -struct RV770_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; - -struct RV770_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[MAX_NO_VREG_STEPS]; - RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable; - RV770_SMC_SWSTATE initialState; - RV770_SMC_SWSTATE ACPIState; - RV770_SMC_SWSTATE driverState; - RV770_SMC_SWSTATE ULVState; -}; - -typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; - -struct vddc_table_entry { - u16 vddc; - u8 vddc_index; - u8 high_smio; - u32 low_smio; -}; - -struct rv770_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct rv730_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_func_cntl; - u32 mpll_func_cntl2; - u32 mpll_func_cntl3; - u32 mpll_ss; - u32 mpll_ss2; -}; - -union r7xx_clock_registers { - struct rv770_clock_registers rv770; - struct rv730_clock_registers rv730; -}; - -struct rv7xx_power_info { - /* flags */ - bool mem_gddr5; - bool pcie_gen2; - bool dynamic_pcie_gen2; - bool acpi_pcie_gen2; - bool boot_in_gen2; - bool voltage_control; /* vddc */ - bool mvdd_control; - bool sclk_ss; - bool mclk_ss; - bool dynamic_ss; - bool gfx_clock_gating; - bool mg_clock_gating; - bool mgcgtssm; - bool power_gating; - bool thermal_protection; - bool display_gap; - bool dcodt; - bool ulps; - /* registers */ - union r7xx_clock_registers clk_regs; - u32 s0_vid_lower_smio_cntl; - /* voltage */ - u32 vddc_mask_low; - u32 mvdd_mask_low; - u32 mvdd_split_frequency; - u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES]; - u16 max_vddc; - u16 max_vddc_in_table; - u16 min_vddc_in_table; - struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS]; - u8 valid_vddc_entries; - /* dc odt */ - u32 mclk_odt_threshold; - u8 odt_value_0[2]; - u8 odt_value_1[2]; - /* stored values */ - u32 boot_sclk; - u16 acpi_vddc; - u32 ref_div; - u32 active_auto_throttle_sources; - u32 mclk_stutter_mode_threshold; - u32 mclk_strobe_mode_threshold; - u32 mclk_edc_enable_threshold; - u32 bsp; - u32 bsu; - u32 pbsp; - u32 pbsu; - u32 dsp; - u32 psp; - u32 asi; - u32 pasi; - u32 vrc; - u32 restricted_levels; - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; - /* smc offsets */ - u16 state_table_start; - u16 soft_regs_start; - u16 sram_end; - /* scratch structs */ - RV770_SMC_STATETABLE smc_statetable; -}; - -struct rv7xx_pl { - u32 sclk; - u32 mclk; - u16 vddc; - u16 vddci; /* eg+ only */ - u32 flags; - enum amdgpu_pcie_gen pcie_gen; /* si+ only */ -}; - -struct rv7xx_ps { - struct rv7xx_pl high; - struct rv7xx_pl medium; - struct rv7xx_pl low; - bool dc_compatible; -}; - -struct si_ps { - u16 performance_level_count; - bool dc_compatible; - struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; -}; - -struct ni_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct ni_cac_data -{ - struct ni_leakage_coeffients leakage_coefficients; - u32 i_leakage; - s32 leakage_minimum_temperature; - u32 pwr_const; - u32 dc_cac_value; - u32 bif_cac_value; - u32 lkge_pwr; - u8 mc_wr_weight; - u8 mc_rd_weight; - u8 allow_ovrflw; - u8 num_win_tdp; - u8 l2num_win_tdp; - u8 lts_truncate_n; -}; - -struct evergreen_power_info { - /* must be first! */ - struct rv7xx_power_info rv7xx; - /* flags */ - bool vddci_control; - bool dynamic_ac_timing; - bool abm; - bool mcls; - bool light_sleep; - bool memory_transition; - bool pcie_performance_request; - bool pcie_performance_request_registered; - bool sclk_deep_sleep; - bool dll_default_on; - bool ls_clock_gating; - bool smu_uvd_hs; - bool uvd_enabled; - /* stored values */ - u16 acpi_vddci; - u8 mvdd_high_index; - u8 mvdd_low_index; - u32 mclk_edc_wr_enable_threshold; - struct evergreen_mc_reg_table mc_reg_table; - struct atom_voltage_table vddc_voltage_table; - struct atom_voltage_table vddci_voltage_table; - struct evergreen_arb_registers bootup_arb_registers; - struct evergreen_ulv_param ulv; - struct at ats[2]; - /* smc offsets */ - u16 mc_reg_table_start; - struct amdgpu_ps current_rps; - struct rv7xx_ps current_ps; - struct amdgpu_ps requested_rps; - struct rv7xx_ps requested_ps; -}; - -struct PP_NIslands_Dpm2PerfLevel -{ - uint8_t MaxPS; - uint8_t TgtAct; - uint8_t MaxPS_StepInc; - uint8_t MaxPS_StepDec; - uint8_t PSST; - uint8_t NearTDPDec; - uint8_t AboveSafeInc; - uint8_t BelowSafeInc; - uint8_t PSDeltaLimit; - uint8_t PSDeltaWin; - uint8_t Reserved[6]; -}; - -typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; - -struct PP_NIslands_DPM2Parameters -{ - uint32_t TDPLimit; - uint32_t NearTDPLimit; - uint32_t SafePowerLimit; - uint32_t PowerBoostLimit; -}; -typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; - -struct NISLANDS_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; - -struct NISLANDS_SMC_MCLK_VALUE -{ - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; - -struct NISLANDS_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; - -struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - uint8_t ACIndex; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t reserved1; - uint8_t reserved2; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - NISLANDS_SMC_SCLK_VALUE sclk; - NISLANDS_SMC_MCLK_VALUE mclk; - NISLANDS_SMC_VOLTAGE_VALUE vddc; - NISLANDS_SMC_VOLTAGE_VALUE mvdd; - NISLANDS_SMC_VOLTAGE_VALUE vddci; - NISLANDS_SMC_VOLTAGE_VALUE std_vddc; - uint32_t powergate_en; - uint8_t hUp; - uint8_t hDown; - uint8_t stateFlags; - uint8_t arbRefreshState; - uint32_t SQPowerThrottle; - uint32_t SQPowerThrottle_2; - uint32_t reserved[2]; - PP_NIslands_Dpm2PerfLevel dpm2; -}; - -typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; - -struct NISLANDS_SMC_SWSTATE -{ - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1]; -}; - -typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; - -struct NISLANDS_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; - -#define NISLANDS_MAX_NO_VREG_STEPS 32 - -struct NISLANDS_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; - PP_NIslands_DPM2Parameters dpm2Params; - NISLANDS_SMC_SWSTATE initialState; - NISLANDS_SMC_SWSTATE ACPIState; - NISLANDS_SMC_SWSTATE ULVState; - NISLANDS_SMC_SWSTATE driverState; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; -}; - -typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; - -struct ni_power_info { - /* must be first! */ - struct evergreen_power_info eg; - struct ni_clock_registers clock_registers; - struct ni_mc_reg_table mc_reg_table; - u32 mclk_rtt_mode_threshold; - /* flags */ - bool use_power_boost_limit; - bool support_cac_long_term_average; - bool cac_enabled; - bool cac_configuration_required; - bool driver_calculate_cac_leakage; - bool pc_enabled; - bool enable_power_containment; - bool enable_cac; - bool enable_sq_ramping; - /* smc offsets */ - u16 arb_table_start; - u16 fan_table_start; - u16 cac_table_start; - u16 spll_table_start; - /* CAC stuff */ - struct ni_cac_data cac_data; - u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; - const struct ni_cac_weights *cac_weights; - u8 lta_window_size; - u8 lts_truncate; - struct si_ps current_ps; - struct si_ps requested_ps; - /* scratch structs */ - SMC_NIslands_MCRegisters smc_mc_reg_table; - NISLANDS_SMC_STATETABLE smc_statetable; -}; - -struct si_cac_config_reg -{ - u32 offset; - u32 mask; - u32 shift; - u32 value; - enum si_cac_config_reg_type type; -}; - -struct si_powertune_data -{ - u32 cac_window; - u32 l2_lta_window_size_default; - u8 lts_truncate_default; - u8 shift_n_default; - u8 operating_temp; - struct ni_leakage_coeffients leakage_coefficients; - u32 fixed_kt; - u32 lkge_lut_v0_percent; - u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS]; - bool enable_powertune_by_default; -}; - -struct si_dyn_powertune_data -{ - u32 cac_leakage; - s32 leakage_minimum_temperature; - u32 wintime; - u32 l2_lta_window_size; - u8 lts_truncate; - u8 shift_n; - u8 dc_pwr_value; - bool disable_uvd_powertune; -}; - -struct si_dte_data -{ - u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - u32 k; - u32 t0; - u32 max_t; - u8 window_size; - u8 temp_select; - u8 dte_mode; - u8 tdep_count; - u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - u32 t_threshold; - bool enable_dte_by_default; -}; - -struct si_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 dll_cntl; - u32 mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl; - u32 mpll_dq_func_cntl; - u32 mpll_func_cntl; - u32 mpll_func_cntl_1; - u32 mpll_func_cntl_2; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct si_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct si_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct si_leakage_voltage_entry -{ - u16 voltage; - u16 leakage_index; -}; - -struct si_leakage_voltage -{ - u16 count; - struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; -}; - - -struct si_ulv_param { - bool supported; - u32 cg_ulv_control; - u32 cg_ulv_parameter; - u32 volt_change_delay; - struct rv7xx_pl pl; - bool one_pcie_lane_in_ulv; -}; - -struct si_power_info { - /* must be first! */ - struct ni_power_info ni; - struct si_clock_registers clock_registers; - struct si_mc_reg_table mc_reg_table; - struct atom_voltage_table mvdd_voltage_table; - struct atom_voltage_table vddc_phase_shed_table; - struct si_leakage_voltage leakage_voltage; - u16 mvdd_bootup_value; - struct si_ulv_param ulv; - u32 max_cu; - /* pcie gen */ - enum amdgpu_pcie_gen force_pcie_gen; - enum amdgpu_pcie_gen boot_pcie_gen; - enum amdgpu_pcie_gen acpi_pcie_gen; - u32 sys_pcie_mask; - /* flags */ - bool enable_dte; - bool enable_ppm; - bool vddc_phase_shed_control; - bool pspp_notify_required; - bool sclk_deep_sleep_above_low; - bool voltage_control_svi2; - bool vddci_control_svi2; - /* smc offsets */ - u32 sram_end; - u32 state_table_start; - u32 soft_regs_start; - u32 mc_reg_table_start; - u32 arb_table_start; - u32 cac_table_start; - u32 dte_table_start; - u32 spll_table_start; - u32 papm_cfg_table_start; - u32 fan_table_start; - /* CAC stuff */ - const struct si_cac_config_reg *cac_weights; - const struct si_cac_config_reg *lcac_config; - const struct si_cac_config_reg *cac_override; - const struct si_powertune_data *powertune_data; - struct si_dyn_powertune_data dyn_powertune_data; - /* DTE stuff */ - struct si_dte_data dte_data; - /* scratch structs */ - SMC_SIslands_MCRegisters smc_mc_reg_table; - SISLANDS_SMC_STATETABLE smc_statetable; - PP_SIslands_PAPMParameters papm_parm; - /* SVI2 */ - u8 svd_gpio_id; - u8 svc_gpio_id; - /* fan control */ - bool fan_ctrl_is_in_default_mode; - u32 t_min; - u32 fan_ctrl_default_mode; - bool fan_is_controlled_by_smc; -}; - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h index 790ba46eaebb..4e935baa7b91 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h @@ -121,7 +121,6 @@ #define CURSOR_UPDATE_LOCK (1 << 16) #define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24) -#define AMDGPU_NUM_OF_VMIDS 8 #define SI_CRTC0_REGISTER_OFFSET 0 #define SI_CRTC1_REGISTER_OFFSET 0x300 #define SI_CRTC2_REGISTER_OFFSET 0x2600 diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 88ae27a5a03d..9a24f17a5750 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -27,6 +27,8 @@ #include "amdgpu_ih.h" #include "sid.h" #include "si_ih.h" +#include "oss/oss_1_0_d.h" +#include "oss/oss_1_0_sh_mask.h" static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev); @@ -41,7 +43,7 @@ static void si_ih_enable_interrupts(struct amdgpu_device *adev) WREG32(IH_RB_CNTL, ih_rb_cntl); adev->irq.ih.enabled = true; } - + static void si_ih_disable_interrupts(struct amdgpu_device *adev) { u32 ih_rb_cntl = RREG32(IH_RB_CNTL); @@ -173,8 +175,7 @@ static int si_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c deleted file mode 100644 index 8f994ffa9cd1..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/si_smc.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Alex Deucher - */ - -#include <linux/firmware.h> - -#include "amdgpu.h" -#include "sid.h" -#include "ppsmc.h" -#include "amdgpu_ucode.h" -#include "sislands_smc.h" - -static int si_set_smc_sram_address(struct amdgpu_device *adev, - u32 smc_address, u32 limit) -{ - if (smc_address & 3) - return -EINVAL; - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(SMC_IND_INDEX_0, smc_address); - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); - - return 0; -} - -int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit) -{ - unsigned long flags; - int ret = 0; - u32 data, original_data, addr, extra_shift; - - if (smc_start_address & 3) - return -EINVAL; - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - - ret = si_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - WREG32(SMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - /* RMW for the final bytes */ - if (byte_count > 0) { - data = 0; - - ret = si_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - original_data = RREG32(SMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - /* SMC address space is BE */ - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (original_data & ~((~0UL) << extra_shift)); - - ret = si_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - WREG32(SMC_IND_DATA_0, data); - } - -done: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} - -void amdgpu_si_start_smc(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); - - tmp &= ~RST_REG; - - WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); -} - -void amdgpu_si_reset_smc(struct amdgpu_device *adev) -{ - u32 tmp; - - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - - tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | - RST_REG; - WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); -} - -int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev) -{ - static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; - - return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); -} - -void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); - - if (enable) - tmp &= ~CK_DISABLE; - else - tmp |= CK_DISABLE; - - WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); -} - -bool amdgpu_si_is_smc_running(struct amdgpu_device *adev) -{ - u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL); - u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); - - if (!(rst & RST_REG) && !(clk & CK_DISABLE)) - return true; - - return false; -} - -PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - u32 tmp; - int i; - - if (!amdgpu_si_is_smc_running(adev)) - return PPSMC_Result_Failed; - - WREG32(SMC_MESSAGE_0, msg); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(SMC_RESP_0); - if (tmp != 0) - break; - udelay(1); - } - - return (PPSMC_Result)RREG32(SMC_RESP_0); -} - -PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - u32 tmp; - int i; - - if (!amdgpu_si_is_smc_running(adev)) - return PPSMC_Result_OK; - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); - if ((tmp & CKEN) == 0) - break; - udelay(1); - } - - return PPSMC_Result_OK; -} - -int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit) -{ - const struct smc_firmware_header_v1_0 *hdr; - unsigned long flags; - u32 ucode_start_address; - u32 ucode_size; - const u8 *src; - u32 data; - - if (!adev->pm.fw) - return -EINVAL; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - src = (const u8 *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - if (ucode_size & 3) - return -EINVAL; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(SMC_IND_INDEX_0, ucode_start_address); - WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); - while (ucode_size >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - - WREG32(SMC_IND_DATA_0, data); - - src += 4; - ucode_size -= 4; - } - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - ret = si_set_smc_sram_address(adev, smc_address, limit); - if (ret == 0) - *value = RREG32(SMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} - -int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 value, u32 limit) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - ret = si_set_smc_sram_address(adev, smc_address, limit); - if (ret == 0) - WREG32(SMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h index 7cf12adb3915..9a39cbfe6db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sid.h +++ b/drivers/gpu/drm/amd/amdgpu/sid.h @@ -47,8 +47,7 @@ #define SI_MAX_LDS_NUM 0xFFFF #define SI_MAX_TCC 16 #define SI_MAX_TCC_MASK 0xFFFF - -#define AMDGPU_NUM_OF_VMIDS 8 +#define SI_MAX_CTLACKS_ASSERTION_WAIT 100 /* SMC IND accessor regs */ #define SMC_IND_INDEX_0 0x80 @@ -1646,9 +1645,10 @@ /* * PM4 */ -#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ - (((reg) >> 2) & 0xFFFF) | \ - ((n) & 0x3FFF) << 16) +#define PACKET_TYPE0 0 +#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ + ((reg) & 0xFFFF) | \ + ((n) & 0x3FFF) << 16) #define CP_PACKET2 0x80000000 #define PACKET2_PAD_SHIFT 0 #define PACKET2_PAD_MASK (0x3fffffff << 0) @@ -2340,11 +2340,6 @@ # define NI_INPUT_GAMMA_XVYCC_222 3 # define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4) -#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x1 -#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000 -#define SRBM_STATUS__IH_BUSY_MASK 0x20000 -#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK 0x400 - #define BLACKOUT_MODE_MASK 0x00000007 #define VGA_RENDER_CONTROL 0xC0 #define R_000300_VGA_RENDER_CONTROL 0xC0 @@ -2431,18 +2426,6 @@ #define MC_SEQ_MISC0__MT__HBM 0x60000000 #define MC_SEQ_MISC0__MT__DDR3 0xB0000000 -#define SRBM_STATUS__MCB_BUSY_MASK 0x200 -#define SRBM_STATUS__MCB_BUSY__SHIFT 0x9 -#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x400 -#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0xa -#define SRBM_STATUS__MCC_BUSY_MASK 0x800 -#define SRBM_STATUS__MCC_BUSY__SHIFT 0xb -#define SRBM_STATUS__MCD_BUSY_MASK 0x1000 -#define SRBM_STATUS__MCD_BUSY__SHIFT 0xc -#define SRBM_STATUS__VMC_BUSY_MASK 0x100 -#define SRBM_STATUS__VMC_BUSY__SHIFT 0x8 - - #define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000 #define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000 #define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000 @@ -2467,8 +2450,6 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000 -#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c #define PCIE_PORT_INDEX 0xe #define PCIE_PORT_DATA 0xf #define EVERGREEN_PIF_PHY0_INDEX 0x8 @@ -2478,4 +2459,36 @@ #define MC_VM_FB_OFFSET 0x81a +/* Discrete VCE clocks */ +#define CG_VCEPLL_FUNC_CNTL 0xc0030600 +#define VCEPLL_RESET_MASK 0x00000001 +#define VCEPLL_SLEEP_MASK 0x00000002 +#define VCEPLL_BYPASS_EN_MASK 0x00000004 +#define VCEPLL_CTLREQ_MASK 0x00000008 +#define VCEPLL_VCO_MODE_MASK 0x00000600 +#define VCEPLL_REF_DIV_MASK 0x003F0000 +#define VCEPLL_CTLACK_MASK 0x40000000 +#define VCEPLL_CTLACK2_MASK 0x80000000 + +#define CG_VCEPLL_FUNC_CNTL_2 0xc0030601 +#define VCEPLL_PDIV_A(x) ((x) << 0) +#define VCEPLL_PDIV_A_MASK 0x0000007F +#define VCEPLL_PDIV_B(x) ((x) << 8) +#define VCEPLL_PDIV_B_MASK 0x00007F00 +#define EVCLK_SRC_SEL(x) ((x) << 20) +#define EVCLK_SRC_SEL_MASK 0x01F00000 +#define ECCLK_SRC_SEL(x) ((x) << 25) +#define ECCLK_SRC_SEL_MASK 0x3E000000 + +#define CG_VCEPLL_FUNC_CNTL_3 0xc0030602 +#define VCEPLL_FB_DIV(x) ((x) << 0) +#define VCEPLL_FB_DIV_MASK 0x01FFFFFF + +#define CG_VCEPLL_FUNC_CNTL_4 0xc0030603 + +#define CG_VCEPLL_FUNC_CNTL_5 0xc0030604 +#define CG_VCEPLL_SPREAD_SPECTRUM 0xc0030606 +#define VCEPLL_SSEN_MASK 0x00000001 + + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h deleted file mode 100644 index d2930eceaf3c..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_SISLANDS_SMC_H -#define PP_SISLANDS_SMC_H - -#include "ppsmc.h" - -#pragma pack(push, 1) - -#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 - -struct PP_SIslands_Dpm2PerfLevel -{ - uint8_t MaxPS; - uint8_t TgtAct; - uint8_t MaxPS_StepInc; - uint8_t MaxPS_StepDec; - uint8_t PSSamplingTime; - uint8_t NearTDPDec; - uint8_t AboveSafeInc; - uint8_t BelowSafeInc; - uint8_t PSDeltaLimit; - uint8_t PSDeltaWin; - uint16_t PwrEfficiencyRatio; - uint8_t Reserved[4]; -}; - -typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel; - -struct PP_SIslands_DPM2Status -{ - uint32_t dpm2Flags; - uint8_t CurrPSkip; - uint8_t CurrPSkipPowerShift; - uint8_t CurrPSkipTDP; - uint8_t CurrPSkipOCP; - uint8_t MaxSPLLIndex; - uint8_t MinSPLLIndex; - uint8_t CurrSPLLIndex; - uint8_t InfSweepMode; - uint8_t InfSweepDir; - uint8_t TDPexceeded; - uint8_t reserved; - uint8_t SwitchDownThreshold; - uint32_t SwitchDownCounter; - uint32_t SysScalingFactor; -}; - -typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status; - -struct PP_SIslands_DPM2Parameters -{ - uint32_t TDPLimit; - uint32_t NearTDPLimit; - uint32_t SafePowerLimit; - uint32_t PowerBoostLimit; - uint32_t MinLimitDelta; -}; -typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters; - -struct PP_SIslands_PAPMStatus -{ - uint32_t EstimatedDGPU_T; - uint32_t EstimatedDGPU_P; - uint32_t EstimatedAPU_T; - uint32_t EstimatedAPU_P; - uint8_t dGPU_T_Limit_Exceeded; - uint8_t reserved[3]; -}; -typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus; - -struct PP_SIslands_PAPMParameters -{ - uint32_t NearTDPLimitTherm; - uint32_t NearTDPLimitPAPM; - uint32_t PlatformPowerLimit; - uint32_t dGPU_T_Limit; - uint32_t dGPU_T_Warning; - uint32_t dGPU_T_Hysteresis; -}; -typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters; - -struct SISLANDS_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE; - -struct SISLANDS_SMC_MCLK_VALUE -{ - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE; - -struct SISLANDS_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t phase_settings; -}; - -typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE; - -struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t ACIndex; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t UVDWatermark; - uint8_t VCEWatermark; - uint8_t strobeMode; - uint8_t mcFlags; - uint8_t padding; - uint32_t aT; - uint32_t bSP; - SISLANDS_SMC_SCLK_VALUE sclk; - SISLANDS_SMC_MCLK_VALUE mclk; - SISLANDS_SMC_VOLTAGE_VALUE vddc; - SISLANDS_SMC_VOLTAGE_VALUE mvdd; - SISLANDS_SMC_VOLTAGE_VALUE vddci; - SISLANDS_SMC_VOLTAGE_VALUE std_vddc; - uint8_t hysteresisUp; - uint8_t hysteresisDown; - uint8_t stateFlags; - uint8_t arbRefreshState; - uint32_t SQPowerThrottle; - uint32_t SQPowerThrottle_2; - uint32_t MaxPoweredUpCU; - SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc; - SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc; - uint32_t reserved[2]; - PP_SIslands_Dpm2PerfLevel dpm2; -}; - -#define SISLANDS_SMC_STROBE_RATIO 0x0F -#define SISLANDS_SMC_STROBE_ENABLE 0x10 - -#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01 -#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02 -#define SISLANDS_SMC_MC_RTT_ENABLE 0x04 -#define SISLANDS_SMC_MC_STUTTER_EN 0x08 -#define SISLANDS_SMC_MC_PG_EN 0x10 - -typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL; - -struct SISLANDS_SMC_SWSTATE -{ - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1]; -}; - -typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; - -#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 -#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 -#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 -#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 -#define SISLANDS_SMC_VOLTAGEMASK_MAX 4 - -struct SISLANDS_SMC_VOLTAGEMASKTABLE -{ - uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE; - -#define SISLANDS_MAX_NO_VREG_STEPS 32 - -struct SISLANDS_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS]; - SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; - SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable; - PP_SIslands_DPM2Parameters dpm2Params; - SISLANDS_SMC_SWSTATE initialState; - SISLANDS_SMC_SWSTATE ACPIState; - SISLANDS_SMC_SWSTATE ULVState; - SISLANDS_SMC_SWSTATE driverState; - SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; -}; - -typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; - -#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0 -#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC -#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28 -#define SI_SMC_SOFT_REGISTER_seq_index 0x5C -#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60 -#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70 -#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78 -#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88 -#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C -#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98 -#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8 -#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4 -#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8 -#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC -#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4 -#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC -#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100 -#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118 -#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c -#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 - -struct PP_SIslands_FanTable -{ - uint8_t fdo_mode; - uint8_t padding; - int16_t temp_min; - int16_t temp_med; - int16_t temp_max; - int16_t slope1; - int16_t slope2; - int16_t fdo_min; - int16_t hys_up; - int16_t hys_down; - int16_t hys_slope; - int16_t temp_resp_lim; - int16_t temp_curr; - int16_t slope_curr; - int16_t pwm_curr; - uint32_t refresh_period; - int16_t fdo_max; - uint8_t temp_src; - int8_t padding2; -}; - -typedef struct PP_SIslands_FanTable PP_SIslands_FanTable; - -#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 -#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 - -#define SMC_SISLANDS_SCALE_I 7 -#define SMC_SISLANDS_SCALE_R 12 - -struct PP_SIslands_CacConfig -{ - uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; - uint32_t lkge_lut_V0; - uint32_t lkge_lut_Vstep; - uint32_t WinTime; - uint32_t R_LL; - uint32_t calculation_repeats; - uint32_t l2numWin_TDP; - uint32_t dc_cac; - uint8_t lts_truncate_n; - uint8_t SHIFT_N; - uint8_t log2_PG_LKG_SCALE; - uint8_t cac_temp; - uint32_t lkge_lut_T0; - uint32_t lkge_lut_Tstep; -}; - -typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig; - -#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16 -#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 - -struct SMC_SIslands_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress; - -struct SMC_SIslands_MCRegisterSet -{ - uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet; - -struct SMC_SIslands_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; - SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; -}; - -typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters; - -struct SMC_SIslands_MCArbDramTimingRegisterSet -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint8_t mc_arb_rfsh_rate; - uint8_t mc_arb_burst_time; - uint8_t padding[2]; -}; - -typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet; - -struct SMC_SIslands_MCArbDramTimingRegisters -{ - uint8_t arb_current; - uint8_t reserved[3]; - SMC_SIslands_MCArbDramTimingRegisterSet data[16]; -}; - -typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters; - -struct SMC_SISLANDS_SPLL_DIV_TABLE -{ - uint32_t freq[256]; - uint32_t ss[256]; -}; - -#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff -#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0 -#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000 -#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25 -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0 -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000 -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20 - -typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE; - -#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5 - -#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16 - -struct Smc_SIslands_DTE_Configuration -{ - uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - uint32_t K; - uint32_t T0; - uint32_t MaxT; - uint8_t WindowSize; - uint8_t Tdep_count; - uint8_t temp_select; - uint8_t DTE_mode; - uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - uint32_t Tthreshold; -}; - -typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration; - -#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1 - -#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000 - -#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0 -#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4 -#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC -#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10 -#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14 -#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18 -#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24 -#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30 -#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38 -#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40 -#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48 - -#pragma pack(pop) - -int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit); -void amdgpu_si_start_smc(struct amdgpu_device *adev); -void amdgpu_si_reset_smc(struct amdgpu_device *adev); -int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev); -void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable); -bool amdgpu_si_is_smc_running(struct amdgpu_device *adev); -PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg); -PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev); -int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit); -int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit); -int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 value, u32 limit); - -#endif - diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index c44723c267c9..73ffa8fde3df 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -32,7 +32,6 @@ #include "amdgpu_amdkfd.h" #include <linux/i2c.h> #include <linux/pci.h> -#include "amdgpu_ras.h" /* error codes */ #define I2C_OK 0 @@ -42,12 +41,9 @@ #define I2C_SW_TIMEOUT 8 #define I2C_ABORT 0x10 -/* I2C transaction flags */ -#define I2C_NO_STOP 1 -#define I2C_RESTART 2 +#define I2C_X_RESTART BIT(31) -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev -#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor) +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en) { @@ -58,12 +54,48 @@ static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en) WREG32_SOC15(SMUIO, 0, mmSMUIO_PWRMGT, reg); } +/* The T_I2C_POLL_US is defined as follows: + * + * "Define a timer interval (t_i2c_poll) equal to 10 times the + * signalling period for the highest I2C transfer speed used in the + * system and supported by DW_apb_i2c. For instance, if the highest + * I2C data transfer mode is 400 kb/s, then t_i2c_poll is 25 us." -- + * DesignWare DW_apb_i2c Databook, Version 1.21a, section 3.8.3.1, + * page 56, with grammar and syntax corrections. + * + * Vcc for our device is at 1.8V which puts it at 400 kHz, + * see Atmel AT24CM02 datasheet, section 8.3 DC Characteristics table, page 14. + * + * The procedure to disable the IP block is described in section + * 3.8.3 Disabling DW_apb_i2c on page 56. + */ +#define I2C_SPEED_MODE_FAST 2 +#define T_I2C_POLL_US 25 +#define I2C_MAX_T_POLL_COUNT 1000 -static void smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable) +static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable) { struct amdgpu_device *adev = to_amdgpu_device(control); WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, enable ? 1 : 0); + + if (!enable) { + int ii; + + for (ii = I2C_MAX_T_POLL_COUNT; ii > 0; ii--) { + u32 en_stat = RREG32_SOC15(SMUIO, + 0, + mmCKSVII2C_IC_ENABLE_STATUS); + if (REG_GET_FIELD(en_stat, CKSVII2C_IC_ENABLE_STATUS, IC_EN)) + udelay(T_I2C_POLL_US); + else + return I2C_OK; + } + + return I2C_ABORT; + } + + return I2C_OK; } static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control) @@ -85,8 +117,13 @@ static void smu_v11_0_i2c_configure(struct i2c_adapter *control) reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_RESTART_EN, 1); reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_10BITADDR_MASTER, 0); reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_10BITADDR_SLAVE, 0); - /* Standard mode */ - reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MAX_SPEED_MODE, 2); + /* The values of IC_MAX_SPEED_MODE are, + * 1: standard mode, 0 - 100 Kb/s, + * 2: fast mode, <= 400 Kb/s, or fast mode plus, <= 1000 Kb/s, + * 3: high speed mode, <= 3.4 Mb/s. + */ + reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MAX_SPEED_MODE, + I2C_SPEED_MODE_FAST); reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MASTER_MODE, 1); WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CON, reg); @@ -115,13 +152,15 @@ static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control) WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_SDA_HOLD, 20); } -static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, uint8_t address) +static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address) { struct amdgpu_device *adev = to_amdgpu_device(control); - /* Convert fromr 8-bit to 7-bit address */ - address >>= 1; - WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TAR, (address & 0xFF)); + /* The IC_TAR::IC_TAR field is 10-bits wide. + * It takes a 7-bit or 10-bit addresses as an address, + * i.e. no read/write bit--no wire format, just the address. + */ + WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TAR, address & 0x3FF); } static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control) @@ -208,12 +247,10 @@ static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control) return ret; } - - - /** * smu_v11_0_i2c_transmit - Send a block of data over the I2C bus to a slave device. * + * @control: I2C adapter reference * @address: The I2C address of the slave device. * @data: The data to transmit over the bus. * @numbytes: The amount of data to transmit. @@ -222,19 +259,19 @@ static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control) * Returns 0 on success or error. */ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control, - uint8_t address, uint8_t *data, - uint32_t numbytes, uint32_t i2c_flag) + u16 address, u8 *data, + u32 numbytes, u32 i2c_flag) { struct amdgpu_device *adev = to_amdgpu_device(control); - uint32_t bytes_sent, reg, ret = 0; + u32 bytes_sent, reg, ret = I2C_OK; unsigned long timeout_counter; bytes_sent = 0; DRM_DEBUG_DRIVER("I2C_Transmit(), address = %x, bytes = %d , data: ", - (uint16_t)address, numbytes); + address, numbytes); - if (drm_debug & DRM_UT_DRIVER) { + if (drm_debug_enabled(DRM_UT_DRIVER)) { print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE, 16, 1, data, numbytes, false); } @@ -247,53 +284,49 @@ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control, /* Clear status bits */ smu_v11_0_i2c_clear_status(control); - timeout_counter = jiffies + msecs_to_jiffies(20); while (numbytes > 0) { reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS); - if (REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF)) { - do { - reg = 0; - /* - * Prepare transaction, no need to set RESTART. I2C engine will send - * START as soon as it sees data in TXFIFO - */ - if (bytes_sent == 0) - reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, RESTART, - (i2c_flag & I2C_RESTART) ? 1 : 0); - reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT, data[bytes_sent]); - - /* determine if we need to send STOP bit or not */ - if (numbytes == 1) - /* Final transaction, so send stop unless I2C_NO_STOP */ - reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, STOP, - (i2c_flag & I2C_NO_STOP) ? 0 : 1); - /* Write */ - reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 0); - WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg); - - /* Record that the bytes were transmitted */ - bytes_sent++; - numbytes--; - - reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS); - - } while (numbytes && REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF)); - } - - /* - * We waited too long for the transmission FIFO to become not-full. - * Exit the loop with error. - */ - if (time_after(jiffies, timeout_counter)) { - ret |= I2C_SW_TIMEOUT; - goto Err; + if (!REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF)) { + /* + * We waited for too long for the transmission + * FIFO to become not-full. Exit the loop + * with error. + */ + if (time_after(jiffies, timeout_counter)) { + ret |= I2C_SW_TIMEOUT; + goto Err; + } + } else { + reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT, + data[bytes_sent]); + + /* Final message, final byte, must generate a + * STOP to release the bus, i.e. don't hold + * SCL low. + */ + if (numbytes == 1 && i2c_flag & I2C_M_STOP) + reg = REG_SET_FIELD(reg, + CKSVII2C_IC_DATA_CMD, + STOP, 1); + + if (bytes_sent == 0 && i2c_flag & I2C_X_RESTART) + reg = REG_SET_FIELD(reg, + CKSVII2C_IC_DATA_CMD, + RESTART, 1); + + /* Write */ + reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 0); + WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg); + + /* Record that the bytes were transmitted */ + bytes_sent++; + numbytes--; } } ret = smu_v11_0_i2c_poll_tx_status(control); - Err: /* Any error, no point in proceeding */ if (ret != I2C_OK) { @@ -315,15 +348,17 @@ Err: /** * smu_v11_0_i2c_receive - Receive a block of data over the I2C bus from a slave device. * + * @control: I2C adapter reference * @address: The I2C address of the slave device. + * @data: Placeholder to store received data. * @numbytes: The amount of data to transmit. * @i2c_flag: Flags for transmission * * Returns 0 on success or error. */ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control, - uint8_t address, uint8_t *data, - uint32_t numbytes, uint8_t i2c_flag) + u16 address, u8 *data, + u32 numbytes, u32 i2c_flag) { struct amdgpu_device *adev = to_amdgpu_device(control); uint32_t bytes_received, ret = I2C_OK; @@ -341,23 +376,21 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control, smu_v11_0_i2c_clear_status(control); - /* Prepare transaction */ - - /* Each time we disable I2C, so this is not a restart */ - if (bytes_received == 0) - reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, RESTART, - (i2c_flag & I2C_RESTART) ? 1 : 0); - reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT, 0); /* Read */ reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 1); - /* Transmitting last byte */ - if (numbytes == 1) - /* Final transaction, so send stop if requested */ - reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, STOP, - (i2c_flag & I2C_NO_STOP) ? 0 : 1); + /* Final message, final byte, must generate a STOP + * to release the bus, i.e. don't hold SCL low. + */ + if (numbytes == 1 && i2c_flag & I2C_M_STOP) + reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, + STOP, 1); + + if (bytes_received == 0 && i2c_flag & I2C_X_RESTART) + reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, + RESTART, 1); WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg); @@ -388,7 +421,7 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control, DRM_DEBUG_DRIVER("I2C_Receive(), address = %x, bytes = %d, data :", (uint16_t)address, bytes_received); - if (drm_debug & DRM_UT_DRIVER) { + if (drm_debug_enabled(DRM_UT_DRIVER)) { print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE, 16, 1, data, bytes_received, false); } @@ -412,7 +445,6 @@ static void smu_v11_0_i2c_abort(struct i2c_adapter *control) DRM_DEBUG_DRIVER("I2C_Abort() Done."); } - static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); @@ -424,7 +456,6 @@ static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control) reg_ic_enable_status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS); reg_ic_enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE); - if ((REG_GET_FIELD(reg_ic_enable, CKSVII2C_IC_ENABLE, ENABLE) == 0) && (REG_GET_FIELD(reg_ic_enable_status, CKSVII2C_IC_ENABLE_STATUS, IC_EN) == 1)) { /* @@ -454,6 +485,8 @@ static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control) static void smu_v11_0_i2c_init(struct i2c_adapter *control) { + int res; + /* Disable clock gating */ smu_v11_0_i2c_set_clock_gating(control, false); @@ -461,7 +494,9 @@ static void smu_v11_0_i2c_init(struct i2c_adapter *control) DRM_WARN("I2C busy !"); /* Disable I2C */ - smu_v11_0_i2c_enable(control, false); + res = smu_v11_0_i2c_enable(control, false); + if (res != I2C_OK) + smu_v11_0_i2c_abort(control); /* Configure I2C to operate as master and in standard mode */ smu_v11_0_i2c_configure(control); @@ -474,21 +509,22 @@ static void smu_v11_0_i2c_init(struct i2c_adapter *control) static void smu_v11_0_i2c_fini(struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); - uint32_t reg_ic_enable_status, reg_ic_enable; - - smu_v11_0_i2c_enable(control, false); + u32 status, enable, en_stat; + int res; - /* Double check if disabled, else force abort */ - reg_ic_enable_status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS); - reg_ic_enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE); + res = smu_v11_0_i2c_enable(control, false); + if (res != I2C_OK) { + status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS); + enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE); + en_stat = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS); - if ((REG_GET_FIELD(reg_ic_enable, CKSVII2C_IC_ENABLE, ENABLE) == 0) && - (REG_GET_FIELD(reg_ic_enable_status, - CKSVII2C_IC_ENABLE_STATUS, IC_EN) == 1)) { - /* - * Nobody is using I2C engine, but engine remains active because - * someone missed to send STOP + /* Nobody is using the I2C engine, yet it remains + * active, possibly because someone missed to send + * STOP. */ + DRM_DEBUG_DRIVER("Aborting from fini: status:0x%08x " + "enable:0x%08x enable_stat:0x%08x", + status, enable, en_stat); smu_v11_0_i2c_abort(control); } @@ -510,14 +546,9 @@ static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control) struct amdgpu_device *adev = to_amdgpu_device(control); /* Send PPSMC_MSG_RequestI2CBus */ - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access) - goto Fail; - - - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle, true)) + if (!amdgpu_dpm_smu_i2c_bus_access(adev, true)) return true; -Fail: return false; } @@ -525,81 +556,51 @@ static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); - /* Send PPSMC_MSG_RequestI2CBus */ - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access) - goto Fail; - /* Send PPSMC_MSG_ReleaseI2CBus */ - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle, - false)) + if (!amdgpu_dpm_smu_i2c_bus_access(adev, false)) return true; -Fail: return false; } -/***************************** EEPROM I2C GLUE ****************************/ +/***************************** I2C GLUE ****************************/ -static uint32_t smu_v11_0_i2c_eeprom_read_data(struct i2c_adapter *control, - uint8_t address, - uint8_t *data, - uint32_t numbytes) +static uint32_t smu_v11_0_i2c_read_data(struct i2c_adapter *control, + struct i2c_msg *msg, uint32_t i2c_flag) { - uint32_t ret = 0; - - /* First 2 bytes are dummy write to set EEPROM address */ - ret = smu_v11_0_i2c_transmit(control, address, data, 2, I2C_NO_STOP); - if (ret != I2C_OK) - goto Fail; + uint32_t ret; - /* Now read data starting with that address */ - ret = smu_v11_0_i2c_receive(control, address, data + 2, numbytes - 2, - I2C_RESTART); + ret = smu_v11_0_i2c_receive(control, msg->addr, msg->buf, msg->len, i2c_flag); -Fail: if (ret != I2C_OK) DRM_ERROR("ReadData() - I2C error occurred :%x", ret); return ret; } -static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control, - uint8_t address, - uint8_t *data, - uint32_t numbytes) +static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control, + struct i2c_msg *msg, uint32_t i2c_flag) { uint32_t ret; - ret = smu_v11_0_i2c_transmit(control, address, data, numbytes, 0); + ret = smu_v11_0_i2c_transmit(control, msg->addr, msg->buf, msg->len, i2c_flag); if (ret != I2C_OK) DRM_ERROR("WriteI2CData() - I2C error occurred :%x", ret); - else - /* - * According to EEPROM spec there is a MAX of 10 ms required for - * EEPROM to flush internal RX buffer after STOP was issued at the - * end of write transaction. During this time the EEPROM will not be - * responsive to any more commands - so wait a bit more. - * - * TODO Improve to wait for first ACK for slave address after - * internal write cycle done. - */ - msleep(10); - + return ret; } static void lock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c); + struct amdgpu_device *adev = to_amdgpu_device(i2c); - if (!smu_v11_0_i2c_bus_lock(i2c)) { + mutex_lock(&adev->pm.smu_i2c_mutex); + if (!smu_v11_0_i2c_bus_lock(i2c)) DRM_ERROR("Failed to lock the bus from SMU"); - return; - } - - control->bus_locked = true; + else + adev->pm.bus_locked = true; } static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) @@ -610,14 +611,13 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c); + struct amdgpu_device *adev = to_amdgpu_device(i2c); - if (!smu_v11_0_i2c_bus_unlock(i2c)) { + if (!smu_v11_0_i2c_bus_unlock(i2c)) DRM_ERROR("Failed to unlock the bus from SMU"); - return; - } - - control->bus_locked = false; + else + adev->pm.bus_locked = false; + mutex_unlock(&adev->pm.smu_i2c_mutex); } static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = { @@ -626,28 +626,61 @@ static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = { .unlock_bus = unlock_bus, }; -static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap, - struct i2c_msg *msgs, int num) +static int smu_v11_0_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, int num) { int i, ret; - struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap); - - if (!control->bus_locked) { - DRM_ERROR("I2C bus unlocked, stopping transaction!"); - return -EIO; - } + u16 addr, dir; smu_v11_0_i2c_init(i2c_adap); + /* From the client's point of view, this sequence of + * messages-- the array i2c_msg *msg, is a single transaction + * on the bus, starting with START and ending with STOP. + * + * The client is welcome to send any sequence of messages in + * this array, as processing under this function here is + * striving to be agnostic. + * + * Record the first address and direction we see. If either + * changes for a subsequent message, generate ReSTART. The + * DW_apb_i2c databook, v1.21a, specifies that ReSTART is + * generated when the direction changes, with the default IP + * block parameter settings, but it doesn't specify if ReSTART + * is generated when the address changes (possibly...). We + * don't rely on the default IP block parameter settings as + * the block is shared and they may change. + */ + if (num > 0) { + addr = msg[0].addr; + dir = msg[0].flags & I2C_M_RD; + } + for (i = 0; i < num; i++) { - if (msgs[i].flags & I2C_M_RD) - ret = smu_v11_0_i2c_eeprom_read_data(i2c_adap, - (uint8_t)msgs[i].addr, - msgs[i].buf, msgs[i].len); + u32 i2c_flag = 0; + + if (msg[i].addr != addr || (msg[i].flags ^ dir) & I2C_M_RD) { + addr = msg[i].addr; + dir = msg[i].flags & I2C_M_RD; + i2c_flag |= I2C_X_RESTART; + } + + if (i == num - 1) { + /* Set the STOP bit on the last message, so + * that the IP block generates a STOP after + * the last byte of the message. + */ + i2c_flag |= I2C_M_STOP; + } + + if (msg[i].flags & I2C_M_RD) + ret = smu_v11_0_i2c_read_data(i2c_adap, + msg + i, + i2c_flag); else - ret = smu_v11_0_i2c_eeprom_write_data(i2c_adap, - (uint8_t)msgs[i].addr, - msgs[i].buf, msgs[i].len); + ret = smu_v11_0_i2c_write_data(i2c_adap, + msg + i, + i2c_flag); if (ret != I2C_OK) { num = -EIO; @@ -659,28 +692,33 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap, return num; } -static u32 smu_v11_0_i2c_eeprom_i2c_func(struct i2c_adapter *adap) +static u32 smu_v11_0_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } +static const struct i2c_algorithm smu_v11_0_i2c_algo = { + .master_xfer = smu_v11_0_i2c_xfer, + .functionality = smu_v11_0_i2c_func, +}; -static const struct i2c_algorithm smu_v11_0_i2c_eeprom_i2c_algo = { - .master_xfer = smu_v11_0_i2c_eeprom_i2c_xfer, - .functionality = smu_v11_0_i2c_eeprom_i2c_func, +static const struct i2c_adapter_quirks smu_v11_0_i2c_control_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN, }; -int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control) +int smu_v11_0_i2c_control_init(struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); int res; + mutex_init(&adev->pm.smu_i2c_mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; + control->class = I2C_CLASS_HWMON; control->dev.parent = &adev->pdev->dev; - control->algo = &smu_v11_0_i2c_eeprom_i2c_algo; - snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + control->algo = &smu_v11_0_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops; + control->quirks = &smu_v11_0_i2c_control_quirks; res = i2c_add_adapter(control); if (res) @@ -689,7 +727,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control) return res; } -void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control) +void smu_v11_0_i2c_control_fini(struct i2c_adapter *control) { i2c_del_adapter(control); } @@ -717,9 +755,9 @@ bool smu_v11_0_i2c_test_bus(struct i2c_adapter *control) smu_v11_0_i2c_init(control); /* Write 0xde to address 0x0000 on the EEPROM */ - ret = smu_v11_0_i2c_eeprom_write_data(control, I2C_TARGET_ADDR, data, 6); + ret = smu_v11_0_i2c_write_data(control, I2C_TARGET_ADDR, data, 6); - ret = smu_v11_0_i2c_eeprom_read_data(control, I2C_TARGET_ADDR, data, 6); + ret = smu_v11_0_i2c_read_data(control, I2C_TARGET_ADDR, data, 6); smu_v11_0_i2c_fini(control); diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h index 973f28d68e70..44467c05f642 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h @@ -28,7 +28,7 @@ struct i2c_adapter; -int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control); -void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control); +int smu_v11_0_i2c_control_init(struct i2c_adapter *control); +void smu_v11_0_i2c_control_fini(struct i2c_adapter *control); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c new file mode 100644 index 000000000000..b6f1322f908c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c @@ -0,0 +1,80 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v11_0.h" +#include "smuio/smuio_11_0_0_offset.h" +#include "smuio/smuio_11_0_0_sh_mask.h" + +static u32 smuio_v11_0_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); +} + +static u32 smuio_v11_0_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); +} + +static void smuio_v11_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable) +{ + u32 def, data; + + /* enable/disable ROM CG is not supported on APU */ + if (adev->flags & AMD_IS_APU) + return; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) + return; + + def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + + if (enable) + data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); + else + data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; + + if (def != data) + WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data); +} + +static void smuio_v11_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags) +{ + u32 data; + + /* CGTT_ROM_CLK_CTRL0 is not available for APU */ + if (adev->flags & AMD_IS_APU) + return; + + data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_ROM_MGCG; +} + +const struct amdgpu_smuio_funcs smuio_v11_0_funcs = { + .get_rom_index_offset = smuio_v11_0_get_rom_index_offset, + .get_rom_data_offset = smuio_v11_0_get_rom_data_offset, + .update_rom_clock_gating = smuio_v11_0_update_rom_clock_gating, + .get_clock_gating_state = smuio_v11_0_get_clock_gating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.h b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.h new file mode 100644 index 000000000000..43c4262f2b8b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V11_0_H__ +#define __SMUIO_V11_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v11_0_funcs; + +#endif /* __SMUIO_V11_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c new file mode 100644 index 000000000000..3a18dbb55c32 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c @@ -0,0 +1,77 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v11_0_6.h" +#include "smuio/smuio_11_0_6_offset.h" +#include "smuio/smuio_11_0_6_sh_mask.h" + +static u32 smuio_v11_0_6_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); +} + +static u32 smuio_v11_0_6_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); +} + +static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bool enable) +{ + u32 def, data; + + /* enable/disable ROM CG is not supported on APU */ + if (adev->flags & AMD_IS_APU) + return; + + def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) + data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); + else + data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; + + if (def != data) + WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data); +} + +static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags) +{ + u32 data; + + /* CGTT_ROM_CLK_CTRL0 is not available for APU */ + if (adev->flags & AMD_IS_APU) + return; + + data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_ROM_MGCG; +} + +const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs = { + .get_rom_index_offset = smuio_v11_0_6_get_rom_index_offset, + .get_rom_data_offset = smuio_v11_0_6_get_rom_data_offset, + .update_rom_clock_gating = smuio_v11_0_6_update_rom_clock_gating, + .get_clock_gating_state = smuio_v11_0_6_get_clock_gating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h new file mode 100644 index 000000000000..3c3f4ab0bc9b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V11_0_6_H__ +#define __SMUIO_V11_0_6_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs; + +#endif /* __SMUIO_V11_0_6_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c new file mode 100644 index 000000000000..39b7c206770f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c @@ -0,0 +1,139 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v13_0.h" +#include "smuio/smuio_13_0_2_offset.h" +#include "smuio/smuio_13_0_2_sh_mask.h" + +#define SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK 0x00000001L + +static u32 smuio_v13_0_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX); +} + +static u32 smuio_v13_0_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA); +} + +static void smuio_v13_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable) +{ + u32 def, data; + + /* enable/disable ROM CG is not supported on APU */ + if (adev->flags & AMD_IS_APU) + return; + + def = data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) + data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); + else + data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; + + if (def != data) + WREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0, data); +} + +static void smuio_v13_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags) +{ + u32 data; + + /* CGTT_ROM_CLK_CTRL0 is not available for APU */ + if (adev->flags & AMD_IS_APU) + return; + + data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0); + if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_ROM_MGCG; +} + +/** + * smuio_v13_0_get_die_id - query die id from FCH. + * + * @adev: amdgpu device pointer + * + * Returns die id + */ +static u32 smuio_v13_0_get_die_id(struct amdgpu_device *adev) +{ + u32 data, die_id; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID); + + return die_id; +} + +/** + * smuio_v13_0_get_socket_id - query socket id from FCH + * + * @adev: amdgpu device pointer + * + * Returns socket id + */ +static u32 smuio_v13_0_get_socket_id(struct amdgpu_device *adev) +{ + u32 data, socket_id; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + socket_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, SOCKET_ID); + + return socket_id; +} + +/** + * smuio_v13_0_is_host_gpu_xgmi_supported - detect xgmi interface between cpu and gpu/s. + * + * @adev: amdgpu device pointer + * + * Returns true on success or false otherwise. + */ +static bool smuio_v13_0_is_host_gpu_xgmi_supported(struct amdgpu_device *adev) +{ + u32 data; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, TOPOLOGY_ID); + /* data[4:0] + * bit 0 == 0 host-gpu interface is PCIE + * bit 0 == 1 host-gpu interface is Alternate Protocal + * for AMD, this is XGMI + */ + data &= SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK; + + return data ? true : false; +} + +const struct amdgpu_smuio_funcs smuio_v13_0_funcs = { + .get_rom_index_offset = smuio_v13_0_get_rom_index_offset, + .get_rom_data_offset = smuio_v13_0_get_rom_data_offset, + .get_die_id = smuio_v13_0_get_die_id, + .get_socket_id = smuio_v13_0_get_socket_id, + .is_host_gpu_xgmi_supported = smuio_v13_0_is_host_gpu_xgmi_supported, + .update_rom_clock_gating = smuio_v13_0_update_rom_clock_gating, + .get_clock_gating_state = smuio_v13_0_get_clock_gating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h new file mode 100644 index 000000000000..a3bfe3e4fb46 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V13_0_H__ +#define __SMUIO_V13_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v13_0_funcs; + +#endif /* __SMUIO_V13_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c new file mode 100644 index 000000000000..8417890af227 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c @@ -0,0 +1,77 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v9_0.h" +#include "smuio/smuio_9_0_offset.h" +#include "smuio/smuio_9_0_sh_mask.h" + +static u32 smuio_v9_0_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); +} + +static u32 smuio_v9_0_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); +} + +static void smuio_v9_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable) +{ + u32 def, data; + + /* enable/disable ROM CG is not supported on APU */ + if (adev->flags & AMD_IS_APU) + return; + + def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) + data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); + else + data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; + + if (def != data) + WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data); +} + +static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags) +{ + u32 data; + + /* CGTT_ROM_CLK_CTRL0 is not availabe for APUs */ + if (adev->flags & AMD_IS_APU) + return; + + data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_ROM_MGCG; +} + +const struct amdgpu_smuio_funcs smuio_v9_0_funcs = { + .get_rom_index_offset = smuio_v9_0_get_rom_index_offset, + .get_rom_data_offset = smuio_v9_0_get_rom_data_offset, + .update_rom_clock_gating = smuio_v9_0_update_rom_clock_gating, + .get_clock_gating_state = smuio_v9_0_get_clock_gating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.h b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.h new file mode 100644 index 000000000000..fc265ce9837d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V9_0_H__ +#define __SMUIO_V9_0_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v9_0_funcs; + +#endif /* __SMUIO_V9_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8e1640bc07af..0c316a2d42ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -25,6 +25,8 @@ #include <linux/module.h> #include <linux/pci.h> +#include <drm/amdgpu_drm.h> + #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_ih.h" @@ -40,10 +42,6 @@ #include "gc/gc_9_0_sh_mask.h" #include "sdma0/sdma0_4_0_offset.h" #include "sdma1/sdma1_4_0_offset.h" -#include "hdp/hdp_4_0_offset.h" -#include "hdp/hdp_4_0_sh_mask.h" -#include "smuio/smuio_9_0_offset.h" -#include "smuio/smuio_9_0_sh_mask.h" #include "nbio/nbio_7_0_default.h" #include "nbio/nbio_7_0_offset.h" #include "nbio/nbio_7_0_sh_mask.h" @@ -61,16 +59,23 @@ #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" +#include "hdp_v4_0.h" #include "vega10_ih.h" +#include "vega20_ih.h" +#include "navi10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" #include "vce_v4_0.h" #include "vcn_v1_0.h" #include "vcn_v2_0.h" +#include "jpeg_v2_0.h" #include "vcn_v2_5.h" -#include "dce_virtual.h" +#include "jpeg_v2_5.h" +#include "smuio_v9_0.h" +#include "smuio_v11_0.h" +#include "smuio_v13_0.h" +#include "amdgpu_vkms.h" #include "mxgpu_ai.h" -#include "amdgpu_smu.h" #include "amdgpu_ras.h" #include "amdgpu_xgmi.h" #include <uapi/linux/kfd_ioctl.h> @@ -80,87 +85,151 @@ #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 -/* for Vega20 register name change */ -#define mmHDP_MEM_POWER_CTRL 0x00d4 -#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L -#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L -#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L -#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L -#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 +static const struct amd_ip_funcs soc15_common_ip_funcs; + +/* Vega, Raven, Arcturus */ +static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, +}; + +static const struct amdgpu_video_codecs vega_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array), + .codec_array = vega_video_codecs_encode_array, +}; + +/* Vega */ +static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, +}; + +static const struct amdgpu_video_codecs vega_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array), + .codec_array = vega_video_codecs_decode_array, +}; + +/* Raven */ +static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)}, +}; + +static const struct amdgpu_video_codecs rv_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array), + .codec_array = rv_video_codecs_decode_array, +}; + +/* Renoir, Arcturus */ +static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] = +{ + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs rn_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array), + .codec_array = rn_video_codecs_decode_array, +}; + +static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, + const struct amdgpu_video_codecs **codecs) +{ + if (adev->ip_versions[VCE_HWIP][0]) { + switch (adev->ip_versions[VCE_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 1, 0): + if (encode) + *codecs = &vega_video_codecs_encode; + else + *codecs = &vega_video_codecs_decode; + return 0; + default: + return -EINVAL; + } + } else { + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + if (encode) + *codecs = &vega_video_codecs_encode; + else + *codecs = &rv_video_codecs_decode; + return 0; + case IP_VERSION(2, 5, 0): + case IP_VERSION(2, 6, 0): + case IP_VERSION(2, 2, 0): + if (encode) + *codecs = &vega_video_codecs_encode; + else + *codecs = &rn_video_codecs_decode; + return 0; + default: + return -EINVAL; + } + } +} + /* * Indirect registers accessor */ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) { - unsigned long flags, address, data; - u32 r; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg); - (void)RREG32(address); - r = RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); - return r; + return amdgpu_device_indirect_rreg(adev, address, data, reg); } static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { - unsigned long flags, address, data; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg); - (void)RREG32(address); - WREG32(data, v); - (void)RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + amdgpu_device_indirect_wreg(adev, address, data, reg, v); } static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) { - unsigned long flags, address, data; - u64 r; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - /* read low 32 bit */ - WREG32(address, reg); - (void)RREG32(address); - r = RREG32(data); - - /* read high 32 bit*/ - WREG32(address, reg + 4); - (void)RREG32(address); - r |= ((u64)RREG32(data) << 32); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); - return r; + return amdgpu_device_indirect_rreg64(adev, address, data, reg); } static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) { - unsigned long flags, address, data; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - /* write low 32 bit */ - WREG32(address, reg); - (void)RREG32(address); - WREG32(data, (u32)(v & 0xffffffffULL)); - (void)RREG32(data); - - /* write high 32 bit */ - WREG32(address, reg + 4); - (void)RREG32(address); - WREG32(data, (u32)(v >> 32)); - (void)RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + amdgpu_device_indirect_wreg64(adev, address, data, reg, v); } static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) @@ -270,7 +339,16 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev) static u32 soc15_get_xclk(struct amdgpu_device *adev) { - return adev->clock.spll.reference_freq; + u32 reference_clock = adev->clock.spll.reference_freq; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) + return 10000; + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) + return reference_clock / 4; + + return reference_clock; } @@ -302,6 +380,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + uint32_t rom_index_offset; + uint32_t rom_data_offset; if (bios == NULL) return false; @@ -314,11 +394,16 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + rom_index_offset = + adev->smuio.funcs->get_rom_index_offset(adev); + rom_data_offset = + adev->smuio.funcs->get_rom_data_offset(adev); + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } @@ -387,7 +472,8 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { en = &soc15_allowed_read_registers[i]; - if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + if (adev->reg_offset[en->hwip][en->inst] && + reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) continue; @@ -426,7 +512,9 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, if (entry->and_mask == 0xffffffff) { tmp = entry->or_mask; } else { - tmp = RREG32(reg); + tmp = (entry->hwip == GC_HWIP) ? + RREG32_SOC15_IP(GC, reg) : RREG32(reg); + tmp &= ~(entry->and_mask); tmp |= (entry->or_mask & entry->and_mask); } @@ -437,143 +525,88 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) WREG32_RLC(reg, tmp); else - WREG32(reg, tmp); - - } - -} - -static int soc15_asic_mode1_reset(struct amdgpu_device *adev) -{ - u32 i; - int ret = 0; - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - dev_info(adev->dev, "GPU mode1 reset\n"); - - /* disable BM */ - pci_clear_master(adev->pdev); - - pci_save_state(adev->pdev); - - ret = psp_gpu_reset(adev); - if (ret) - dev_err(adev->dev, "GPU mode1 reset failed\n"); + (entry->hwip == GC_HWIP) ? + WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp); - pci_restore_state(adev->pdev); - - /* wait for asic to come out of reset */ - for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio.funcs->get_memsize(adev); - - if (memsize != 0xffffffff) - break; - udelay(1); } - amdgpu_atombios_scratch_regs_engine_hung(adev, false); - - return ret; -} - -static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) -{ - if (is_support_sw_smu(adev)) { - struct smu_context *smu = &adev->smu; - - *cap = smu_baco_is_support(smu); - return 0; - } else { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { - *cap = false; - return -ENOENT; - } - - return pp_funcs->get_asic_baco_capability(pp_handle, cap); - } } static int soc15_asic_baco_reset(struct amdgpu_device *adev) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; /* avoid NBIF got stuck when do RAS recovery in BACO reset */ - if (ras && ras->supported) + if (ras && adev->ras_enabled) adev->nbio.funcs->enable_doorbell_interrupt(adev, false); - dev_info(adev->dev, "GPU BACO reset\n"); - - if (is_support_sw_smu(adev)) { - struct smu_context *smu = &adev->smu; - - if (smu_baco_reset(smu)) - return -EIO; - } else { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; - - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; - - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; - } + ret = amdgpu_dpm_baco_reset(adev); + if (ret) + return ret; /* re-enable doorbell interrupt after BACO exit */ - if (ras && ras->supported) + if (ras && adev->ras_enabled) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); return 0; } -static int soc15_mode2_reset(struct amdgpu_device *adev) -{ - if (is_support_sw_smu(adev)) - return smu_mode2_reset(&adev->smu); - if (!adev->powerplay.pp_funcs || - !adev->powerplay.pp_funcs->asic_reset_mode_2) - return -ENOENT; - - return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle); -} - static enum amd_reset_method soc15_asic_reset_method(struct amdgpu_device *adev) { - bool baco_reset; + bool baco_reset = false; + bool connected_to_cpu = false; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: + if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu) + connected_to_cpu = true; + + if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || + amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || + amdgpu_reset_method == AMD_RESET_METHOD_BACO || + amdgpu_reset_method == AMD_RESET_METHOD_PCI) { + /* If connected to cpu, driver only support mode2 */ + if (connected_to_cpu) + return AMD_RESET_METHOD_MODE2; + return amdgpu_reset_method; + } + + if (amdgpu_reset_method != -1) + dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", + amdgpu_reset_method); + + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): return AMD_RESET_METHOD_MODE2; - case CHIP_VEGA10: - case CHIP_VEGA12: - soc15_asic_get_baco_capability(adev, &baco_reset); - break; - case CHIP_VEGA20: - if (adev->psp.sos_fw_version >= 0x80067) - soc15_asic_get_baco_capability(adev, &baco_reset); - else - baco_reset = false; - if (baco_reset) { - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - - if (hive || (ras && ras->supported)) + case IP_VERSION(9, 0, 0): + case IP_VERSION(11, 0, 2): + if (adev->asic_type == CHIP_VEGA20) { + if (adev->psp.sos.fw_version >= 0x80067) + baco_reset = amdgpu_dpm_is_baco_supported(adev); + /* + * 1. PMFW version > 0x284300: all cases use baco + * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco + */ + if (ras && adev->ras_enabled && + adev->pm.fw_version <= 0x283400) baco_reset = false; + } else { + baco_reset = amdgpu_dpm_is_baco_supported(adev); } break; + case IP_VERSION(13, 0, 2): + /* + * 1.connected to cpu: driver issue mode2 reset + * 2.discret gpu: driver issue mode1 reset + */ + if (connected_to_cpu) + return AMD_RESET_METHOD_MODE2; + break; default: - baco_reset = false; break; } @@ -585,17 +618,42 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { + /* original raven doesn't have full asic reset */ + if ((adev->apu_flags & AMD_APU_IS_RAVEN) && + !(adev->apu_flags & AMD_APU_IS_RAVEN2)) + return 0; + switch (soc15_asic_reset_method(adev)) { - case AMD_RESET_METHOD_BACO: - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); - return soc15_asic_baco_reset(adev); - case AMD_RESET_METHOD_MODE2: - return soc15_mode2_reset(adev); - default: - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); - return soc15_asic_mode1_reset(adev); + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + return amdgpu_device_pci_reset(adev); + case AMD_RESET_METHOD_BACO: + dev_info(adev->dev, "BACO reset\n"); + return soc15_asic_baco_reset(adev); + case AMD_RESET_METHOD_MODE2: + dev_info(adev->dev, "MODE2 reset\n"); + return amdgpu_dpm_mode2_reset(adev); + default: + dev_info(adev->dev, "MODE1 reset\n"); + return amdgpu_device_mode1_reset(adev); + } +} + +static bool soc15_supports_baco(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(11, 0, 2): + if (adev->asic_type == CHIP_VEGA20) { + if (adev->psp.sos.fw_version >= 0x80067) + return amdgpu_dpm_is_baco_supported(adev); + return false; + } else { + return amdgpu_dpm_is_baco_supported(adev); + } + break; + default: + return false; } } @@ -645,11 +703,12 @@ static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) static void soc15_program_aspm(struct amdgpu_device *adev) { - - if (amdgpu_aspm == 0) + if (!amdgpu_aspm) return; - /* todo */ + if (!(adev->flags & AMD_IS_APU) && + (adev->nbio.funcs->program_aspm)) + adev->nbio.funcs->program_aspm(adev); } static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, @@ -659,7 +718,7 @@ static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } -static const struct amdgpu_ip_block_version vega10_common_ip_block = +const struct amdgpu_ip_block_version vega10_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 2, @@ -673,14 +732,27 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) return adev->nbio.funcs->get_rev_id(adev); } -int soc15_set_ip_blocks(struct amdgpu_device *adev) +static void soc15_reg_base_init(struct amdgpu_device *adev) { + int r; + /* Set IP register base before any HW register access */ switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_RAVEN: + vega10_reg_base_init(adev); + break; case CHIP_RENOIR: + /* It's safe to do ip discovery here for Renior, + * it doesn't support SRIOV. */ + if (amdgpu_discovery) { + r = amdgpu_discovery_reg_base_init(adev); + if (r == 0) + break; + DRM_WARN("failed to init reg base from ip discovery table, " + "fallback to legacy init method\n"); + } vega10_reg_base_init(adev); break; case CHIP_VEGA20: @@ -689,159 +761,23 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_ARCTURUS: arct_reg_base_init(adev); break; - default: - return -EINVAL; - } - - if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) - adev->gmc.xgmi.supported = true; - - if (adev->flags & AMD_IS_APU) { - adev->nbio.funcs = &nbio_v7_0_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; - } else if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) { - adev->nbio.funcs = &nbio_v7_4_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; - } else { - adev->nbio.funcs = &nbio_v6_1_funcs; - adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; - } - - if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) - adev->df_funcs = &df_v3_6_funcs; - else - adev->df_funcs = &df_v1_7_funcs; - - adev->rev_id = soc15_get_rev_id(adev); - adev->nbio.funcs->detect_hw_virt(adev); - - if (amdgpu_sriov_vf(adev)) - adev->virt.ops = &xgpu_ai_virt_ops; - - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - /* For Vega10 SR-IOV, PSP need to be initialized before IH */ - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - } - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - } - } - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (!amdgpu_sriov_vf(adev)) { - if (is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - } - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { - amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); - } - break; - case CHIP_RAVEN: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); - break; - case CHIP_ARCTURUS: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - } - - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - - if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); - break; - case CHIP_RENOIR: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); - if (is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + case CHIP_ALDEBARAN: + aldebaran_reg_base_init(adev); break; default: - return -EINVAL; + DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type); + break; } - - return 0; } -static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) +void soc15_set_virt_ops(struct amdgpu_device *adev) { - adev->nbio.funcs->hdp_flush(adev, ring); -} + adev->virt.ops = &xgpu_ai_virt_ops; -static void soc15_invalidate_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); - else - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); + /* init soc15 reg base early enough so we can + * request request full access for sriov before + * set_ip_blocks. */ + soc15_reg_base_init(adev); } static bool soc15_need_full_reset(struct amdgpu_device *adev) @@ -849,6 +785,7 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev) /* change this when we implement soft reset */ return true; } + static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1) { @@ -980,6 +917,11 @@ static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) return (nak_r + nak_g); } +static void soc15_pre_asic_init(struct amdgpu_device *adev) +{ + gmc_v9_0_restore_registers(adev); +} + static const struct amdgpu_asic_funcs soc15_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, @@ -992,13 +934,14 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .set_uvd_clocks = &soc15_set_uvd_clocks, .set_vce_clocks = &soc15_set_vce_clocks, .get_config_memsize = &soc15_get_config_memsize, - .flush_hdp = &soc15_flush_hdp, - .invalidate_hdp = &soc15_invalidate_hdp, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega10_doorbell_index_init, .get_pcie_usage = &soc15_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, + .supports_baco = &soc15_supports_baco, + .pre_asic_init = &soc15_pre_asic_init, + .query_video_codecs = &soc15_query_video_codecs, }; static const struct amdgpu_asic_funcs vega20_asic_funcs = @@ -1007,19 +950,20 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .read_bios_from_rom = &soc15_read_bios_from_rom, .read_register = &soc15_read_register, .reset = &soc15_asic_reset, + .reset_method = &soc15_asic_reset_method, .set_vga_state = &soc15_vga_set_state, .get_xclk = &soc15_get_xclk, .set_uvd_clocks = &soc15_set_uvd_clocks, .set_vce_clocks = &soc15_set_vce_clocks, .get_config_memsize = &soc15_get_config_memsize, - .flush_hdp = &soc15_flush_hdp, - .invalidate_hdp = &soc15_invalidate_hdp, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega20_doorbell_index_init, .get_pcie_usage = &vega20_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, - .reset_method = &soc15_asic_reset_method + .supports_baco = &soc15_supports_baco, + .pre_asic_init = &soc15_pre_asic_init, + .query_video_codecs = &soc15_query_video_codecs, }; static int soc15_common_early_init(void *handle) @@ -1044,10 +988,13 @@ static int soc15_common_early_init(void *handle) adev->se_cac_rreg = &soc15_se_cac_rreg; adev->se_cac_wreg = &soc15_se_cac_wreg; - + adev->rev_id = soc15_get_rev_id(adev); adev->external_rev_id = 0xFF; - switch (adev->asic_type) { - case CHIP_VEGA10: + /* TODO: split the GC and PG flags based on the relevant IP version for which + * they are relevant. + */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): adev->asic_funcs = &soc15_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1071,7 +1018,7 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = 0x1; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): adev->asic_funcs = &soc15_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1094,7 +1041,7 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1117,18 +1064,23 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x28; break; - case CHIP_RAVEN: + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): adev->asic_funcs = &soc15_asic_funcs; + if (adev->rev_id >= 0x8) + adev->apu_flags |= AMD_APU_IS_RAVEN2; + + if (adev->apu_flags & AMD_APU_IS_RAVEN2) adev->external_rev_id = adev->rev_id + 0x79; - else if (adev->pdev->device == 0x15d8) + else if (adev->apu_flags & AMD_APU_IS_PICASSO) adev->external_rev_id = adev->rev_id + 0x41; else if (adev->rev_id == 1) adev->external_rev_id = adev->rev_id + 0x20; else adev->external_rev_id = adev->rev_id + 0x01; - if (adev->rev_id >= 0x8) { + if (adev->apu_flags & AMD_APU_IS_RAVEN2) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CP_LS | @@ -1138,42 +1090,36 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_VCN | - AMD_PG_SUPPORT_VCN_DPG; - } else if (adev->pdev->device == 0x15d8) { + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + } else if (adev->apu_flags & AMD_APU_IS_PICASSO) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CP_LS | - AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCN_MGCG; adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_MMHUB | - AMD_PG_SUPPORT_VCN | - AMD_PG_SUPPORT_VCN_DPG; + AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_CP_LS | - AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | @@ -1183,19 +1129,16 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_DRM_MGCG | AMD_CG_SUPPORT_DRM_LS | - AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_VCN | - AMD_PG_SUPPORT_VCN_DPG; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1208,12 +1151,19 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_LS | - AMD_CG_SUPPORT_IH_CG; - adev->pg_flags = 0; + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; + adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x32; break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): adev->asic_funcs = &soc15_asic_funcs; + + if (adev->apu_flags & AMD_APU_IS_RENOIR) + adev->external_rev_id = adev->rev_id + 0x91; + else + adev->external_rev_id = adev->rev_id + 0xa1; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_3D_CGCG | @@ -1227,16 +1177,29 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_DF_MGCG; adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_VCN_DPG; - adev->external_rev_id = adev->rev_id + 0x91; + break; + case IP_VERSION(9, 4, 2): + adev->asic_funcs = &vega20_asic_funcs; + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; + adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG; + adev->external_rev_id = adev->rev_id + 0x3c; break; default: /* FIXME: not supported yet */ @@ -1259,8 +1222,9 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); - if (adev->nbio.funcs->ras_late_init) - r = adev->nbio.funcs->ras_late_init(adev); + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->ras_late_init) + r = adev->nbio.ras_funcs->ras_late_init(adev); return r; } @@ -1272,7 +1236,7 @@ static int soc15_common_sw_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_add_irq_id(adev); - adev->df_funcs->sw_init(adev); + adev->df.funcs->sw_init(adev); return 0; } @@ -1281,8 +1245,10 @@ static int soc15_common_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_nbio_ras_fini(adev); - adev->df_funcs->sw_fini(adev); + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->ras_fini) + adev->nbio.ras_funcs->ras_fini(adev); + adev->df.funcs->sw_fini(adev); return 0; } @@ -1345,9 +1311,11 @@ static int soc15_common_hw_fini(void *handle) if (adev->nbio.ras_if && amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { - if (adev->nbio.funcs->init_ras_controller_interrupt) + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->init_ras_controller_interrupt) amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); - if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); } @@ -1383,40 +1351,6 @@ static int soc15_common_soft_reset(void *handle) return 0; } -static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) -{ - uint32_t def, data; - - if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) { - def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) - data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; - else - data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | - HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); - - if (def != data) - WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); - } else { - def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) - data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; - else - data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; - - if (def != data) - WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); - } -} - static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) { uint32_t def, data; @@ -1461,24 +1395,6 @@ static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); } -static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable) -{ - uint32_t def, data; - - def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) - data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | - CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); - else - data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | - CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; - - if (def != data) - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); -} - static int soc15_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1487,43 +1403,43 @@ static int soc15_common_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(6, 1, 0): + case IP_VERSION(6, 2, 0): + case IP_VERSION(7, 4, 0): adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); - soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); + adev->hdp.funcs->update_clock_gating(adev, + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); - soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); - adev->df_funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); + adev->smuio.funcs->update_rom_clock_gating(adev, + state == AMD_CG_STATE_GATE); + adev->df.funcs->update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); break; - case CHIP_RAVEN: - case CHIP_RENOIR: + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 0, 1): + case IP_VERSION(2, 5, 0): adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); - soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); + adev->hdp.funcs->update_clock_gating(adev, + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); - soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; - case CHIP_ARCTURUS: - soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + case IP_VERSION(7, 4, 1): + case IP_VERSION(7, 4, 4): + adev->hdp.funcs->update_clock_gating(adev, + state == AMD_CG_STATE_GATE); break; default: break; @@ -1541,27 +1457,25 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) adev->nbio.funcs->get_clockgating_state(adev, flags); - /* AMD_CG_SUPPORT_HDP_LS */ - data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); - if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) - *flags |= AMD_CG_SUPPORT_HDP_LS; + adev->hdp.funcs->get_clock_gating_state(adev, flags); + + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) { - /* AMD_CG_SUPPORT_DRM_MGCG */ - data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); - if (!(data & 0x01000000)) - *flags |= AMD_CG_SUPPORT_DRM_MGCG; + /* AMD_CG_SUPPORT_DRM_MGCG */ + data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); + if (!(data & 0x01000000)) + *flags |= AMD_CG_SUPPORT_DRM_MGCG; - /* AMD_CG_SUPPORT_DRM_LS */ - data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); - if (data & 0x1) - *flags |= AMD_CG_SUPPORT_DRM_LS; + /* AMD_CG_SUPPORT_DRM_LS */ + data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); + if (data & 0x1) + *flags |= AMD_CG_SUPPORT_DRM_LS; + } /* AMD_CG_SUPPORT_ROM_MGCG */ - data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); - if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) - *flags |= AMD_CG_SUPPORT_ROM_MGCG; + adev->smuio.funcs->get_clock_gating_state(adev, flags); - adev->df_funcs->get_clockgating_state(adev, flags); + adev->df.funcs->get_clockgating_state(adev, flags); } static int soc15_common_set_powergating_state(void *handle, @@ -1571,7 +1485,7 @@ static int soc15_common_set_powergating_state(void *handle, return 0; } -const struct amd_ip_funcs soc15_common_ip_funcs = { +static const struct amd_ip_funcs soc15_common_ip_funcs = { .name = "soc15_common", .early_init = soc15_common_early_init, .late_init = soc15_common_late_init, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 57af489a5de3..efc2a253e8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -28,11 +28,11 @@ #include "nbio_v7_0.h" #include "nbio_v7_4.h" +extern const struct amdgpu_ip_block_version vega10_common_ip_block; + #define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 #define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 -extern const struct amd_ip_funcs soc15_common_ip_funcs; - struct soc15_reg_golden { u32 hwip; u32 instance; @@ -42,6 +42,20 @@ struct soc15_reg_golden { u32 or_mask; }; +struct soc15_reg_rlcg { + u32 hwip; + u32 instance; + u32 segment; + u32 reg; +}; + +struct soc15_reg { + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; +}; + struct soc15_reg_entry { uint32_t hwip; uint32_t inst; @@ -60,6 +74,18 @@ struct soc15_allowed_register_entry { bool grbm_indexed; }; +struct soc15_ras_field_entry { + const char *name; + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + uint32_t sec_count_mask; + uint32_t sec_count_shift; + uint32_t ded_count_mask; + uint32_t ded_count_shift; +}; + #define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg #define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset) @@ -69,9 +95,13 @@ struct soc15_allowed_register_entry { #define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT +#define SOC15_REG_FIELD_VAL(val, mask, shift) (((val) & mask) >> shift) + +#define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift) + void soc15_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); -int soc15_set_ip_blocks(struct amdgpu_device *adev); +void soc15_set_virt_ops(struct amdgpu_device *adev); void soc15_program_register_sequence(struct amdgpu_device *adev, const struct soc15_reg_golden *registers, @@ -80,6 +110,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); +int aldebaran_reg_base_init(struct amdgpu_device *adev); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 839f186e1182..8a9ca87d8663 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -27,37 +27,65 @@ /* Register Access Macros */ #define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) +#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \ + ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_wreg) ? \ + adev->gfx.rlc.funcs->sriov_wreg(adev, reg, value, flag, hwip) : \ + WREG32(reg, value)) + +#define __RREG32_SOC15_RLC__(reg, flag, hwip) \ + ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_rreg) ? \ + adev->gfx.rlc.funcs->sriov_rreg(adev, reg, flag, hwip) : \ + RREG32(reg)) + #define WREG32_FIELD15(ip, idx, reg, field, val) \ - WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ - (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \ - & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ + (__RREG32_SOC15_RLC__( \ + adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ + 0, ip##_HWIP) & \ + ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ + 0, ip##_HWIP) #define RREG32_SOC15(ip, inst, reg) \ - RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + 0, ip##_HWIP) + +#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP) + +#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \ + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + AMDGPU_REGS_NO_KIQ, ip##_HWIP) #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \ - RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset) + __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, 0, ip##_HWIP) #define WREG32_SOC15(ip, inst, reg, value) \ - WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ + value, 0, ip##_HWIP) + +#define WREG32_SOC15_IP(ip, reg, value) \ + __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP) #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ - WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value) + __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ - WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \ + value, 0, ip##_HWIP) -#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \ +#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \ +({ int ret = 0; \ do { \ - uint32_t old_ = 0; \ + uint32_t old_ = 0; \ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ uint32_t loop = adev->usec_timeout; \ + ret = 0; \ while ((tmp_ & (mask)) != (expected_value)) { \ if (old_ != tmp_) { \ loop = adev->usec_timeout; \ - old_ = tmp_; \ - } else \ - udelay(1); \ + old_ = tmp_; \ + } else \ + udelay(1); \ tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ loop--; \ if (!loop) { \ @@ -67,17 +95,21 @@ break; \ } \ } \ - } while (0) + } while (0); \ + ret; \ +}) -#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a))) #define WREG32_RLC(reg, value) \ + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP) + +#define WREG32_RLC_EX(prefix, reg, value) \ do { \ - if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ + if (amdgpu_sriov_fullaccess(adev)) { \ uint32_t i = 0; \ uint32_t retries = 50000; \ - uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \ - uint32_t r1 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1; \ - uint32_t spare_int = adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT; \ + uint32_t r0 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \ + uint32_t r1 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \ + uint32_t spare_int = adev->reg_offset[GC_HWIP][0][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \ WREG32(r0, value); \ WREG32(r1, (reg | 0x80000000)); \ WREG32(spare_int, 0x1); \ @@ -94,14 +126,28 @@ } \ } while (0) +/* shadow the registers in the callback function */ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP) + +/* for GC only */ +#define RREG32_RLC(reg) \ + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP) + +#define WREG32_RLC_NO_KIQ(reg, value, hwip) \ + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) + +#define RREG32_RLC_NO_KIQ(reg, hwip) \ + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) + +#define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ - if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ - uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \ - uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \ - uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \ - uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; \ + if (amdgpu_sriov_fullaccess(adev)) { \ + uint32_t r2 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2; \ + uint32_t r3 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3; \ + uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL; \ + uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX; \ if (target_reg == grbm_cntl) \ WREG32(r2, value); \ else if (target_reg == grbm_idx) \ @@ -112,18 +158,32 @@ } \ } while (0) +#define RREG32_SOC15_RLC(ip, inst, reg) \ + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP) + #define WREG32_SOC15_RLC(ip, inst, reg, value) \ do { \ + uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\ + __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \ + } while (0) + +#define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \ + do { \ uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\ - WREG32_RLC(target_reg, value); \ + WREG32_RLC_EX(prefix, target_reg, value); \ } while (0) #define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \ - WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \ - (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \ - & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \ + (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ + AMDGPU_REGS_RLC, ip##_HWIP) & \ + ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ + AMDGPU_REGS_RLC, ip##_HWIP) #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \ - WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP) + +#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \ + __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP) #endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h index edfe50821cd9..799925d22fc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15d.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h @@ -253,7 +253,30 @@ # define PACKET3_DMA_DATA_CMD_SAIC (1 << 28) # define PACKET3_DMA_DATA_CMD_DAIC (1 << 29) # define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30) -#define PACKET3_AQUIRE_MEM 0x58 +#define PACKET3_ACQUIRE_MEM 0x58 +/* 1. HEADER + * 2. COHER_CNTL [30:0] + * 2.1 ENGINE_SEL [31:31] + * 3. COHER_SIZE [31:0] + * 4. COHER_SIZE_HI [7:0] + * 5. COHER_BASE_LO [31:0] + * 6. COHER_BASE_HI [23:0] + * 7. POLL_INTERVAL [15:0] + */ +/* COHER_CNTL fields for CP_COHER_CNTL */ +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_NC_ACTION_ENA(x) ((x) << 3) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WC_ACTION_ENA(x) ((x) << 4) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_INV_METADATA_ACTION_ENA(x) ((x) << 5) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_VOL_ACTION_ENA(x) ((x) << 15) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(x) ((x) << 18) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(x) ((x) << 22) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(x) ((x) << 23) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_CB_ACTION_ENA(x) ((x) << 25) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_DB_ACTION_ENA(x) ((x) << 26) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(x) ((x) << 27) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_VOL_ACTION_ENA(x) ((x) << 28) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29) +#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30) #define PACKET3_REWIND 0x59 #define PACKET3_LOAD_UCONFIG_REG 0x5E #define PACKET3_LOAD_SH_REG 0x5F @@ -286,6 +309,7 @@ #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 #define PACKET3_SWITCH_BUFFER 0x8B #define PACKET3_FRAME_CONTROL 0x90 +# define FRAME_TMZ (1 << 0) # define FRAME_CMD(x) ((x) << 28) /* * x=0: tmz_begin diff --git a/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h new file mode 100644 index 000000000000..f14833fae07c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h @@ -0,0 +1,84 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TA_RAP_IF_H +#define _TA_RAP_IF_H + +/* Responses have bit 31 set */ +#define RSP_ID_MASK (1U << 31) +#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) + +enum ta_rap_status { + TA_RAP_STATUS__SUCCESS = 1, + TA_RAP_STATUS__ERROR_GENERIC_FAILURE = 2, + TA_RAP_STATUS__ERROR_CMD_NOT_SUPPORTED = 3, + TA_RAP_STATUS__ERROR_INVALID_VALIDATION_METHOD = 4, + TA_RAP_STATUS__ERROR_NULL_POINTER = 5, + TA_RAP_STATUS__ERROR_NOT_INITIALIZED = 6, + TA_RAP_STATUS__ERROR_VALIDATION_FAILED = 7, + TA_RAP_STATUS__ERROR_ASIC_NOT_SUPPORTED = 8, + TA_RAP_STATUS__ERROR_OPERATION_NOT_PERMISSABLE = 9, + TA_RAP_STATUS__ERROR_ALREADY_INIT = 10, +}; + +enum ta_rap_cmd { + TA_CMD_RAP__INITIALIZE = 1, + TA_CMD_RAP__VALIDATE_L0 = 2, +}; + +enum ta_rap_validation_method { + METHOD_A = 1, +}; + +struct ta_rap_cmd_input_data { + uint8_t reserved[8]; +}; + +struct ta_rap_cmd_output_data { + uint32_t last_subsection; + uint32_t num_total_validate; + uint32_t num_valid; + uint32_t last_validate_addr; + uint32_t last_validate_val; + uint32_t last_validate_val_exptd; +}; + +union ta_rap_cmd_input { + struct ta_rap_cmd_input_data input; +}; + +union ta_rap_cmd_output { + struct ta_rap_cmd_output_data output; +}; + +struct ta_rap_shared_memory { + uint32_t cmd_id; + uint32_t validation_method_id; + uint32_t resp_id; + enum ta_rap_status rap_status; + union ta_rap_cmd_input rap_in_message; + union ta_rap_cmd_output rap_out_message; + uint8_t reserved[64]; +}; + +#endif // #define _TA_RAP_IF_H diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index ca7d05993ca2..5093826a43d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -24,6 +24,8 @@ #ifndef _TA_RAS_IF_H #define _TA_RAS_IF_H +#define RAS_TA_HOST_IF_VER 0 + /* Responses have bit 31 set */ #define RSP_ID_MASK (1U << 31) #define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) @@ -37,17 +39,32 @@ enum ras_command { }; enum ta_ras_status { - TA_RAS_STATUS__SUCCESS = 0x00, - TA_RAS_STATUS__RESET_NEEDED = 0x01, - TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02, - TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03, - TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04, - TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05, - TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0x06, - TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0x07, - TA_RAS_STATUS__ERROR_TIMEOUT = 0x08, - TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0x09, - TA_RAS_STATUS__ERROR_GENERIC = 0x10, + TA_RAS_STATUS__SUCCESS = 0x0000, + TA_RAS_STATUS__RESET_NEEDED = 0xA001, + TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002, + TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003, + TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004, + TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0xA005, + TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0xA006, + TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007, + TA_RAS_STATUS__ERROR_TIMEOUT = 0xA008, + TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0XA009, + TA_RAS_STATUS__ERROR_GENERIC = 0xA00A, + TA_RAS_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B, + TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C, + TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D, + TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E, + TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F, + TA_RAS_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010, + TA_RAS_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011, + TA_RAS_STATUS__ERROR_RAS_READ_WRITE = 0xA012, + TA_RAS_STATUS__ERROR_NULL_PTR = 0xA013, + TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014, + TA_RAS_STATUS__ERROR_PCS_STATE_QUIET = 0xA015, + TA_RAS_STATUS__ERROR_PCS_STATE_ERROR = 0xA016, + TA_RAS_STATUS__ERROR_PCS_STATE_HANG = 0xA017, + TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018, + TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019 }; enum ta_ras_block { @@ -65,9 +82,18 @@ enum ta_ras_block { TA_RAS_BLOCK__MP0, TA_RAS_BLOCK__MP1, TA_RAS_BLOCK__FUSE, + TA_RAS_BLOCK__MCA, TA_NUM_BLOCK_MAX }; +enum ta_ras_mca_block { + TA_RAS_MCA_BLOCK__MP0 = 0, + TA_RAS_MCA_BLOCK__MP1 = 1, + TA_RAS_MCA_BLOCK__MPIO = 2, + TA_RAS_MCA_BLOCK__IOHC = 3, + TA_MCA_NUM_BLOCK_MAX +}; + enum ta_ras_error_type { TA_RAS_ERROR__NONE = 0, TA_RAS_ERROR__PARITY = 1, @@ -97,22 +123,43 @@ struct ta_ras_trigger_error_input { uint64_t value; // method if error injection. i.e persistent, coherent etc. }; +struct ta_ras_init_flags { + uint8_t poison_mode_en; + uint8_t dgpu_mode; +}; + +struct ta_ras_output_flags { + uint8_t ras_init_success_flag; + uint8_t err_inject_switch_disable_flag; + uint8_t reg_access_failure_flag; +}; + /* Common input structure for RAS callbacks */ /**********************************************************/ union ta_ras_cmd_input { + struct ta_ras_init_flags init_flags; struct ta_ras_enable_features_input enable_features; struct ta_ras_disable_features_input disable_features; struct ta_ras_trigger_error_input trigger_error; + + uint32_t reserve_pad[256]; +}; + +union ta_ras_cmd_output { + struct ta_ras_output_flags flags; + + uint32_t reserve_pad[256]; }; /* Shared Memory structures */ /**********************************************************/ struct ta_ras_shared_memory { - uint32_t cmd_id; - uint32_t resp_id; - enum ta_ras_status ras_status; - uint32_t reserved; - union ta_ras_cmd_input ras_in_message; + uint32_t cmd_id; + uint32_t resp_id; + uint32_t ras_status; + uint32_t if_version; + union ta_ras_cmd_input ras_in_message; + union ta_ras_cmd_output ras_out_message; }; #endif // TL_RAS_IF_H_ diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h new file mode 100644 index 000000000000..cf8ff064dc72 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h @@ -0,0 +1,155 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TA_SECUREDISPLAY_IF_H +#define _TA_SECUREDISPLAY_IF_H + +/** Secure Display related enumerations */ +/**********************************************************/ + +/** @enum ta_securedisplay_command + * Secure Display Command ID + */ +enum ta_securedisplay_command { + /* Query whether TA is responding used only for validation purpose */ + TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1, + /* Send region of Interest and CRC value to I2C */ + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2, + /* Maximum Command ID */ + TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF, +}; + +/** @enum ta_securedisplay_status + * Secure Display status returns in shared buffer status + */ +enum ta_securedisplay_status { + TA_SECUREDISPLAY_STATUS__SUCCESS = 0x00, /* Success */ + TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE = 0x01, /* Generic Failure */ + TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER = 0x02, /* Invalid Parameter */ + TA_SECUREDISPLAY_STATUS__NULL_POINTER = 0x03, /* Null Pointer*/ + TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */ + TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/ + TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/ + TA_SECUREDISPLAY_STATUS__I2C_INIT_ERROR = 0x07, /* Failed to initialize I2C */ + + TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/ +}; + +/** @enum ta_securedisplay_max_phy + * Physical ID number to use for reading corresponding DIO Scratch register for ROI + */ +enum ta_securedisplay_max_phy { + TA_SECUREDISPLAY_PHY0 = 0, + TA_SECUREDISPLAY_PHY1 = 1, + TA_SECUREDISPLAY_PHY2 = 2, + TA_SECUREDISPLAY_PHY3 = 3, + TA_SECUREDISPLAY_MAX_PHY = 4, +}; + +/** @enum ta_securedisplay_ta_query_cmd_ret + * A predefined specific reteurn value which is 0xAB only used to validate + * communication to Secure Display TA is functional. + * This value is used to validate whether TA is responding successfully + */ +enum ta_securedisplay_ta_query_cmd_ret { + /* This is a value to validate if TA is loaded successfully */ + TA_SECUREDISPLAY_QUERY_CMD_RET = 0xAB, +}; + +/** @enum ta_securedisplay_buffer_size + * I2C Buffer size which contains 8 bytes of ROI (X start, X end, Y start, Y end) + * and 6 bytes of CRC( R,G,B) and 1 byte for physical ID + */ +enum ta_securedisplay_buffer_size { + /* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */ + TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15, +}; + +/** Input/output structures for Secure Display commands */ +/**********************************************************/ +/** + * Input structures + */ + +/** @struct ta_securedisplay_send_roi_crc_input + * Physical ID to determine which DIO scratch register should be used to get ROI + */ +struct ta_securedisplay_send_roi_crc_input { + uint32_t phy_id; /* Physical ID */ +}; + +/** @union ta_securedisplay_cmd_input + * Input buffer + */ +union ta_securedisplay_cmd_input { + /* send ROI and CRC input buffer format */ + struct ta_securedisplay_send_roi_crc_input send_roi_crc; + uint32_t reserved[4]; +}; + +/** + * Output structures + */ + +/** @struct ta_securedisplay_query_ta_output + * Output buffer format for query TA whether TA is responding used only for validation purpose + */ +struct ta_securedisplay_query_ta_output { + /* return value from TA when it is queried for validation purpose only */ + uint32_t query_cmd_ret; +}; + +/** @struct ta_securedisplay_send_roi_crc_output + * Output buffer format for send ROI CRC command which will pass I2c buffer created inside TA + * and used to write to I2C used only for validation purpose + */ +struct ta_securedisplay_send_roi_crc_output { + uint8_t i2c_buf[TA_SECUREDISPLAY_I2C_BUFFER_SIZE]; /* I2C buffer */ + uint8_t reserved; +}; + +/** @union ta_securedisplay_cmd_output + * Output buffer + */ +union ta_securedisplay_cmd_output { + /* Query TA output buffer format used only for validation purpose*/ + struct ta_securedisplay_query_ta_output query_ta; + /* Send ROI CRC output buffer format used only for validation purpose */ + struct ta_securedisplay_send_roi_crc_output send_roi_crc; + uint32_t reserved[4]; +}; + +/** @struct securedisplay_cmd + * Secure Display Command which is shared buffer memory + */ +struct securedisplay_cmd { + uint32_t cmd_id; /* +0 Bytes Command ID */ + enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */ + uint32_t reserved[2]; /* +8 Bytes Reserved */ + union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */ + union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */ + /**@note Total 48 Bytes */ +}; + +#endif //_TA_SECUREDISPLAY_IF_H + diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h index ac2c27b7630c..da815a93d46e 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h @@ -33,7 +33,8 @@ enum ta_command_xgmi { TA_COMMAND_XGMI__GET_NODE_ID = 0x01, TA_COMMAND_XGMI__GET_HIVE_ID = 0x02, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03, - TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04 + TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04, + TA_COMMAND_XGMI__GET_PEER_LINKS = 0x0B }; /* XGMI related enumerations */ @@ -75,6 +76,11 @@ struct ta_xgmi_node_info { enum ta_xgmi_assigned_sdma_engine sdma_engine; }; +struct ta_xgmi_peer_link_info { + uint64_t node_id; + uint8_t num_links; +}; + struct ta_xgmi_cmd_initialize_output { uint32_t status; }; @@ -97,6 +103,11 @@ struct ta_xgmi_cmd_get_topology_info_output { struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; }; +struct ta_xgmi_cmd_get_peer_link_info_output { + uint32_t num_nodes; + struct ta_xgmi_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; +}; + struct ta_xgmi_cmd_set_topology_info_input { uint32_t num_nodes; struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; @@ -115,6 +126,7 @@ union ta_xgmi_cmd_output { struct ta_xgmi_cmd_get_node_id_output get_node_id; struct ta_xgmi_cmd_get_hive_id_output get_hive_id; struct ta_xgmi_cmd_get_topology_info_output get_topology_info; + struct ta_xgmi_cmd_get_peer_link_info_output get_link_info; }; /**********************************************************/ @@ -122,7 +134,8 @@ struct ta_xgmi_shared_memory { uint32_t cmd_id; uint32_t resp_id; enum ta_xgmi_status xgmi_status; - uint32_t reserved; + uint8_t flag_extend_link_record; + uint8_t reserved0[3]; union ta_xgmi_cmd_input xgmi_in_message; union ta_xgmi_cmd_output xgmi_out_message; }; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index e40140bf6699..b08905d1c00f 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -181,6 +181,7 @@ static void tonga_ih_irq_disable(struct amdgpu_device *adev) * tonga_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to fetch wptr * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (VI). Also check for @@ -195,19 +196,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, wptr = le32_to_cpu(*ih->wptr_cpu); - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 16). Hopefully - * this should allow us to catchup. - */ - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); - ih->rptr = (wptr + 16) & ih->ptr_mask; - tmp = RREG32(mmIH_RB_CNTL); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32(mmIH_RB_CNTL, tmp); - } + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32(mmIH_RB_WPTR); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; + tmp = RREG32(mmIH_RB_CNTL); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32(mmIH_RB_CNTL, tmp); + +out: return (wptr & ih->ptr_mask); } @@ -215,6 +227,8 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, * tonga_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to decode + * @entry: IV entry to place decoded information into * * Decodes the interrupt vector at the current rptr * position and also advance the position. @@ -247,6 +261,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, * tonga_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to set rptr * * Set the IH ring buffer rptr. */ @@ -297,8 +312,7 @@ static int tonga_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 47c4b96b14d1..20b44983ac94 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -22,24 +22,16 @@ */ #include "umc_v6_1.h" #include "amdgpu_ras.h" +#include "amdgpu_umc.h" #include "amdgpu.h" #include "rsmu/rsmu_0_0_2_offset.h" #include "rsmu/rsmu_0_0_2_sh_mask.h" #include "umc/umc_6_1_1_offset.h" #include "umc/umc_6_1_1_sh_mask.h" +#include "umc/umc_6_1_2_offset.h" -#define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 - -/* - * (addr / 256) * 8192, the higher 26 bits in ErrorAddr - * is the index of 8KB block - */ -#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) -/* channel index is the index of 256B block */ -#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) -/* offset in 256B block */ -#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) +#define UMC_6_INST_DIST 0x40000 const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { @@ -49,41 +41,129 @@ const uint32_t {9, 25, 0, 16}, {15, 31, 6, 22} }; -static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev, - uint32_t umc_instance) +static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev) { - uint32_t rsmu_umc_index; + uint32_t rsmu_umc_addr, rsmu_umc_val; - rsmu_umc_index = RREG32_SOC15(RSMU, 0, + rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, + rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4); + + rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, RSMU_UMC_INDEX_MODE_EN, 1); - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - RSMU_UMC_INDEX_INSTANCE, umc_instance); - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - RSMU_UMC_INDEX_WREN, 1 << umc_instance); - WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - rsmu_umc_index); + + WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val); } static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) { - WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + uint32_t rsmu_umc_addr, rsmu_umc_val; + + rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4); + + rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, RSMU_UMC_INDEX_MODE_EN, 0); + + WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val); } -static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev) +static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev) { - uint32_t rsmu_umc_index; + uint32_t rsmu_umc_addr, rsmu_umc_val; + + rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4); - rsmu_umc_index = RREG32_SOC15(RSMU, 0, - mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); - return REG_GET_FIELD(rsmu_umc_index, - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, - RSMU_UMC_INDEX_INSTANCE); + return REG_GET_FIELD(rsmu_umc_val, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_MODE_EN); +} + +static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst; +} + +static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_err_cnt_addr; + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCnt_ARCT); + } else { + /* UMC 6_1_1 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, + mmUMCCH0_0_EccErrCnt); + } + + /* select the lower chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear lower chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V6_1_CE_CNT_INIT); + + /* select the higher chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear higher chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V6_1_CE_CNT_INIT); +} + +static void umc_v6_1_clear_error_count(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + uint32_t rsmu_umc_index_state = + umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_clear_error_count_per_channel(adev, + umc_reg_offset); + } + + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); } static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, @@ -95,39 +175,48 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, uint64_t mc_umc_status; uint32_t mc_umc_status_addr; - ecc_err_cnt_sel_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); - ecc_err_cnt_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* select the lower chip and check the error count */ - ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 0); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); - ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); *error_count += (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - UMC_V6_1_CE_CNT_INIT); - /* clear the lower chip err count */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); /* select the higher chip and check the err counter */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 1); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); - ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); *error_count += (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - UMC_V6_1_CE_CNT_INIT); - /* clear the higher chip err count */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); /* check for SRAM correctable error MCUMC_STATUS is a 64 bit register */ - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) @@ -141,11 +230,18 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev uint64_t mc_umc_status; uint32_t mc_umc_status_addr; - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* check the MCUMC_STATUS */ - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -155,49 +251,91 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev *error_count += 1; } -static void umc_v6_1_query_error_count(struct amdgpu_device *adev, - struct ras_err_data *err_data, uint32_t umc_reg_offset, - uint32_t channel_index) -{ - umc_v6_1_query_correctable_error_count(adev, umc_reg_offset, - &(err_data->ce_count)); - umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, - &(err_data->ue_count)); -} - static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - amdgpu_umc_for_each_channel(umc_v6_1_query_error_count); + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_query_correctable_error_count(adev, + umc_reg_offset, + &(err_data->ce_count)); + umc_v6_1_querry_uncorrectable_error_count(adev, + umc_reg_offset, + &(err_data->ue_count)); + } + + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); + + umc_v6_1_clear_error_count(adev); } static void umc_v6_1_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t channel_index) + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) { uint32_t lsb, mc_umc_status_addr; - uint64_t mc_umc_status, err_addr, retired_page; + uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; struct eeprom_table_record *err_rec; + uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0_ARCT); + } else { + /* UMC 6_1_1 registers */ + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); + } - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return; - /* skip error address process if -ENOMEM */ if (!err_data->err_addr) { /* clear umc status */ - WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL); + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); return; } err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { - err_addr = RREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4); + err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); /* the lowest lsb bits should be ignored */ lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB); err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); @@ -218,64 +356,119 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; err_rec->cu = 0; err_rec->mem_channel = channel_index; - err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev); + err_rec->mcumc_id = umc_inst; err_data->err_addr_cnt++; } } /* clear umc status */ - WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL); + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); } static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, void *ras_error_status) { - amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } + + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); } static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, - struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t channel_index) + uint32_t umc_reg_offset) { uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; uint32_t ecc_err_cnt_addr; - ecc_err_cnt_sel_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); - ecc_err_cnt_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); + } else { + /* UMC 6_1_1 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + } /* select the lower chip and check the error count */ - ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 0); /* set ce error interrupt type to APIC based interrupt */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrInt, 0x1); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); /* set error count to initial value */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); /* select the higher chip and check the err counter */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, EccErrCntCsSel, 1); - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); } static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) { - void *ras_error_status = NULL; + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + uint32_t rsmu_umc_index_state = umc_v6_1_get_umc_index_mode_state(adev); + + if (rsmu_umc_index_state) + umc_v6_1_disable_umc_index_mode(adev); + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_6_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset); + } - amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel); + if (rsmu_umc_index_state) + umc_v6_1_enable_umc_index_mode(adev); } -const struct amdgpu_umc_funcs umc_v6_1_funcs = { +const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = { .err_cnt_init = umc_v6_1_err_cnt_init, .ras_late_init = amdgpu_umc_ras_late_init, + .ras_fini = amdgpu_umc_ras_fini, .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, - .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, - .disable_umc_index_mode = umc_v6_1_disable_umc_index_mode, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index dab9cbd292c5..5dc36c730bb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -35,7 +35,8 @@ /* total channel instances in one umc block */ #define UMC_V6_1_TOTAL_CHANNEL_NUM (UMC_V6_1_CHANNEL_INSTANCE_NUM * UMC_V6_1_UMC_INSTANCE_NUM) /* UMC regiser per channel offset */ -#define UMC_V6_1_PER_CHANNEL_OFFSET 0x800 +#define UMC_V6_1_PER_CHANNEL_OFFSET_VG20 0x800 +#define UMC_V6_1_PER_CHANNEL_OFFSET_ARCT 0x400 /* EccErrCnt max value */ #define UMC_V6_1_CE_CNT_MAX 0xffff @@ -44,7 +45,7 @@ /* umc ce count initial value */ #define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD) -extern const struct amdgpu_umc_funcs umc_v6_1_funcs; +extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs; extern const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM]; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c new file mode 100644 index 000000000000..f7ec3fe134e5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -0,0 +1,330 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v6_7.h" +#include "amdgpu_ras.h" +#include "amdgpu_umc.h" +#include "amdgpu.h" + +#include "umc/umc_6_7_0_offset.h" +#include "umc/umc_6_7_0_sh_mask.h" + +const uint32_t + umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = { + {28, 20, 24, 16, 12, 4, 8, 0}, + {6, 30, 2, 26, 22, 14, 18, 10}, + {19, 11, 15, 7, 3, 27, 31, 23}, + {9, 1, 5, 29, 25, 17, 21, 13} +}; +const uint32_t + umc_v6_7_channel_idx_tbl_first[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = { + {19, 11, 15, 7, 3, 27, 31, 23}, + {9, 1, 5, 29, 25, 17, 21, 13}, + {28, 20, 24, 16, 12, 4, 8, 0}, + {6, 30, 2, 26, 22, 14, 18, 10}, +}; + +static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst; +} + +static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t ecc_err_cnt, ecc_err_cnt_addr; + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + /* UMC 6_1_1 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCnt); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); + + /* select the lower chip and check the error count */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); + *error_count += + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - + UMC_V6_7_CE_CNT_INIT); + + /* select the higher chip and check the err counter */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); + *error_count += + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - + UMC_V6_7_CE_CNT_INIT); + + /* check for SRAM correctable error + MCUMC_STATUS is a 64 bit register */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); + + /* check the MCUMC_STATUS */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_err_cnt_addr; + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, + regUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, + regUMCCH0_0_EccErrCnt); + + /* select the lower chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear lower chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V6_7_CE_CNT_INIT); + + /* select the higher chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear higher chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V6_7_CE_CNT_INIT); +} + +static void umc_v6_7_reset_error_count(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v6_7_reset_error_count_per_channel(adev, + umc_reg_offset); + } +} + +static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC registers. Will add the protection */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + umc_v6_7_query_correctable_error_count(adev, + umc_reg_offset, + &(err_data->ce_count)); + umc_v6_7_querry_uncorrectable_error_count(adev, + umc_reg_offset, + &(err_data->ue_count)); + } + + umc_v6_7_reset_error_count(adev); +} + +static void umc_v6_7_query_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) +{ + uint32_t mc_umc_status_addr; + uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; + struct eeprom_table_record *err_rec; + uint32_t channel_index; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); + + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return; + + if (!err_data->err_addr) { + /* clear umc status */ + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + return; + } + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; + + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + + err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; + } + } + + /* clear umc status */ + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); +} + +static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC resgisters. Will add the protection + * when firmware interface is ready */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + umc_v6_7_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } +} + +static uint32_t umc_v6_7_query_ras_poison_mode_per_channel( + struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_ctrl_addr, ecc_ctrl; + + ecc_ctrl_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccCtrl); + ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr + + umc_reg_offset) * 4); + + return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_EccCtrl, UCFatalEn); +} + +static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + /* Enabling fatal error in one channel will be considered + as fatal error mode */ + if (umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset)) + return false; + } + + return true; +} + +const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = { + .ras_late_init = amdgpu_umc_ras_late_init, + .ras_fini = amdgpu_umc_ras_fini, + .query_ras_error_count = umc_v6_7_query_ras_error_count, + .query_ras_error_address = umc_v6_7_query_ras_error_address, + .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h new file mode 100644 index 000000000000..57f2557e7aca --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h @@ -0,0 +1,52 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V6_7_H__ +#define __UMC_V6_7_H__ + +#include "soc15_common.h" +#include "amdgpu.h" + +/* EccErrCnt max value */ +#define UMC_V6_7_CE_CNT_MAX 0xffff +/* umc ce interrupt threshold */ +#define UMC_V6_7_CE_INT_THRESHOLD 0xffff +/* umc ce count initial value */ +#define UMC_V6_7_CE_CNT_INIT (UMC_V6_7_CE_CNT_MAX - UMC_V6_7_CE_INT_THRESHOLD) + +#define UMC_V6_7_INST_DIST 0x40000 + +/* number of umc channel instance with memory map register access */ +#define UMC_V6_7_UMC_INSTANCE_NUM 4 +/* number of umc instance with memory map register access */ +#define UMC_V6_7_CHANNEL_INSTANCE_NUM 8 +/* total channel instances in one umc block */ +#define UMC_V6_7_TOTAL_CHANNEL_NUM (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM) +/* UMC regiser per channel offset */ +#define UMC_V6_7_PER_CHANNEL_OFFSET 0x400 +extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs; +extern const uint32_t + umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM]; +extern const uint32_t + umc_v6_7_channel_idx_tbl_first[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM]; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c new file mode 100644 index 000000000000..af59a35788e3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -0,0 +1,333 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v8_7.h" +#include "amdgpu_ras.h" +#include "amdgpu_umc.h" +#include "amdgpu.h" + +#include "rsmu/rsmu_0_0_2_offset.h" +#include "rsmu/rsmu_0_0_2_sh_mask.h" +#include "umc/umc_8_7_0_offset.h" +#include "umc/umc_8_7_0_sh_mask.h" + +#define UMC_8_INST_DIST 0x40000 + +const uint32_t + umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM] = { + {2, 11}, {4, 13}, + {1, 8}, {7, 14}, + {10, 3}, {12, 5}, + {9, 0}, {15, 6} +}; + +static inline uint32_t get_umc_8_reg_offset(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_offs*ch_inst + UMC_8_INST_DIST*umc_inst; +} + +static void umc_v8_7_clear_error_count_per_channel(struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_err_cnt_addr; + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt); + + /* select the lower chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear lower chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V8_7_CE_CNT_INIT); + + /* select the higher chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear higher chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V8_7_CE_CNT_INIT); +} + +static void umc_v8_7_clear_error_count(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_clear_error_count_per_channel(adev, + umc_reg_offset); + } +} + +static void umc_v8_7_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t ecc_err_cnt, ecc_err_cnt_addr; + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + /* UMC 8_7_2 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + + /* select the lower chip and check the error count */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); + *error_count += + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) - + UMC_V8_7_CE_CNT_INIT); + + /* select the higher chip and check the err counter */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); + *error_count += + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) - + UMC_V8_7_CE_CNT_INIT); + + /* check for SRAM correctable error + MCUMC_STATUS is a 64 bit register */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +static void umc_v8_7_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + + /* check the MCUMC_STATUS */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +static void umc_v8_7_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_query_correctable_error_count(adev, + umc_reg_offset, + &(err_data->ce_count)); + umc_v8_7_querry_uncorrectable_error_count(adev, + umc_reg_offset, + &(err_data->ue_count)); + } + + umc_v8_7_clear_error_count(adev); +} + +static void umc_v8_7_query_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) +{ + uint32_t lsb, mc_umc_status_addr; + uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; + struct eeprom_table_record *err_rec; + uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); + + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return; + + if (!err_data->err_addr) { + /* clear umc status */ + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + return; + } + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + + err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); + /* the lowest lsb bits should be ignored */ + lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB); + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + err_addr &= ~((0x1ULL << lsb) - 1); + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_4KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; + } + } + + /* clear umc status */ + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); +} + +static void umc_v8_7_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } +} + +static void umc_v8_7_err_cnt_init_per_channel(struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t ecc_err_cnt_addr; + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt); + + /* select the lower chip and check the error count */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 0); + /* set ce error interrupt type to APIC based interrupt */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrInt, 0x1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + /* set error count to initial value */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT); + + /* select the higher chip and check the err counter */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT); +} + +static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_err_cnt_init_per_channel(adev, umc_reg_offset); + } +} + +const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = { + .err_cnt_init = umc_v8_7_err_cnt_init, + .ras_late_init = amdgpu_umc_ras_late_init, + .ras_fini = amdgpu_umc_ras_fini, + .query_ras_error_count = umc_v8_7_query_ras_error_count, + .query_ras_error_address = umc_v8_7_query_ras_error_address, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h new file mode 100644 index 000000000000..37e6dc7c28e0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h @@ -0,0 +1,51 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V8_7_H__ +#define __UMC_V8_7_H__ + +#include "soc15_common.h" +#include "amdgpu.h" + +/* HBM Memory Channel Width */ +#define UMC_V8_7_HBM_MEMORY_CHANNEL_WIDTH 128 +/* number of umc channel instance with memory map register access */ +#define UMC_V8_7_CHANNEL_INSTANCE_NUM 2 +/* number of umc instance with memory map register access */ +#define UMC_V8_7_UMC_INSTANCE_NUM 8 +/* total channel instances in one umc block */ +#define UMC_V8_7_TOTAL_CHANNEL_NUM (UMC_V8_7_CHANNEL_INSTANCE_NUM * UMC_V8_7_UMC_INSTANCE_NUM) +/* UMC regiser per channel offset */ +#define UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA 0x400 + +/* EccErrCnt max value */ +#define UMC_V8_7_CE_CNT_MAX 0xffff +/* umc ce interrupt threshold */ +#define UMC_V8_7_CE_INT_THRESHOLD 0xffff +/* umc ce count initial value */ +#define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD) + +extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs; +extern const uint32_t + umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM]; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c new file mode 100644 index 000000000000..0fef925b6602 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -0,0 +1,825 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Sonny Jiang <sonny.jiang@amd.com> + */ + +#include <linux/firmware.h> + +#include "amdgpu.h" +#include "amdgpu_uvd.h" +#include "sid.h" + +#include "uvd/uvd_3_1_d.h" +#include "uvd/uvd_3_1_sh_mask.h" + +#include "oss/oss_1_0_d.h" +#include "oss/oss_1_0_sh_mask.h" + +/** + * uvd_v3_1_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t uvd_v3_1_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_RPTR); +} + +/** + * uvd_v3_1_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t uvd_v3_1_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32(mmUVD_RBC_RB_WPTR); +} + +/** + * uvd_v3_1_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void uvd_v3_1_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); +} + +/** + * uvd_v3_1_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @job: iob associated with the indirect buffer + * @ib: indirect buffer to execute + * @flags: flags associated with the indirect buffer + * + * Write ring commands to execute the indirect buffer + */ +static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); + amdgpu_ring_write(ring, ib->gpu_addr); + amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); + amdgpu_ring_write(ring, ib->length_dw); +} + +/** + * uvd_v3_1_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags + * + * Write a fence and a trap command to the ring. + */ +static void uvd_v3_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, addr & 0xffffffff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 2); +} + +/** + * uvd_v3_1_ring_test_ring - register write test + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully write to the context register + */ +static int uvd_v3_1_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 3); + if (r) + return r; + + amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(mmUVD_CONTEXT_ID); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + +static void uvd_v3_1_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + +static const struct amdgpu_ring_funcs uvd_v3_1_ring_funcs = { + .type = AMDGPU_RING_TYPE_UVD, + .align_mask = 0xf, + .support_64bit_ptrs = false, + .no_user_fence = true, + .get_rptr = uvd_v3_1_ring_get_rptr, + .get_wptr = uvd_v3_1_ring_get_wptr, + .set_wptr = uvd_v3_1_ring_set_wptr, + .parse_cs = amdgpu_uvd_ring_parse_cs, + .emit_frame_size = + 14, /* uvd_v3_1_ring_emit_fence x1 no user fence */ + .emit_ib_size = 4, /* uvd_v3_1_ring_emit_ib */ + .emit_ib = uvd_v3_1_ring_emit_ib, + .emit_fence = uvd_v3_1_ring_emit_fence, + .test_ring = uvd_v3_1_ring_test_ring, + .test_ib = amdgpu_uvd_ring_test_ib, + .insert_nop = uvd_v3_1_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, +}; + +static void uvd_v3_1_set_ring_funcs(struct amdgpu_device *adev) +{ + adev->uvd.inst->ring.funcs = &uvd_v3_1_ring_funcs; +} + +static void uvd_v3_1_set_dcm(struct amdgpu_device *adev, + bool sw_mode) +{ + u32 tmp, tmp2; + + WREG32_FIELD(UVD_CGC_GATE, REGS, 0); + + tmp = RREG32(mmUVD_CGC_CTRL); + tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); + tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | + (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | + (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); + + if (sw_mode) { + tmp &= ~0x7ffff800; + tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | + UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | + (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); + } else { + tmp |= 0x7ffff800; + tmp2 = 0; + } + + WREG32(mmUVD_CGC_CTRL, tmp); + WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); +} + +/** + * uvd_v3_1_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * + * Let the UVD memory controller know it's offsets + */ +static void uvd_v3_1_mc_resume(struct amdgpu_device *adev) +{ + uint64_t addr; + uint32_t size; + + /* programm the VCPU memory controller bits 0-27 */ + addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; + size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; + WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); + WREG32(mmUVD_VCPU_CACHE_SIZE0, size); + + addr += size; + size = AMDGPU_UVD_HEAP_SIZE >> 3; + WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); + WREG32(mmUVD_VCPU_CACHE_SIZE1, size); + + addr += size; + size = (AMDGPU_UVD_STACK_SIZE + + (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; + WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); + WREG32(mmUVD_VCPU_CACHE_SIZE2, size); + + /* bits 28-31 */ + addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; + WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); + + /* bits 32-39 */ + addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; + WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); + + WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); +} + +/** + * uvd_v3_1_fw_validate - FW validation operation + * + * @adev: amdgpu_device pointer + * + * Initialate and check UVD validation. + */ +static int uvd_v3_1_fw_validate(struct amdgpu_device *adev) +{ + int i; + uint32_t keysel = adev->uvd.keyselect; + + WREG32(mmUVD_FW_START, keysel); + + for (i = 0; i < 10; ++i) { + mdelay(10); + if (RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__DONE_MASK) + break; + } + + if (i == 10) + return -ETIMEDOUT; + + if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__PASS_MASK)) + return -EINVAL; + + for (i = 0; i < 10; ++i) { + mdelay(10); + if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__BUSY_MASK)) + break; + } + + if (i == 10) + return -ETIMEDOUT; + + return 0; +} + +/** + * uvd_v3_1_start - start UVD block + * + * @adev: amdgpu_device pointer + * + * Setup and start the UVD block + */ +static int uvd_v3_1_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->uvd.inst->ring; + uint32_t rb_bufsz; + int i, j, r; + u32 tmp; + /* disable byte swapping */ + u32 lmi_swap_cntl = 0; + u32 mp_swap_cntl = 0; + + /* set uvd busy */ + WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); + + uvd_v3_1_set_dcm(adev, true); + WREG32(mmUVD_CGC_GATE, 0); + + /* take UVD block out of reset */ + WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + /* enable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 1 << 9); + + /* disable interrupt */ + WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; +#endif + WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); + + /* initialize UVD memory controller */ + WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | + (1 << 21) | (1 << 9) | (1 << 20)); + + tmp = RREG32(mmUVD_MPC_CNTL); + WREG32(mmUVD_MPC_CNTL, tmp | 0x10); + + WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXA1, 0x0); + WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); + WREG32(mmUVD_MPC_SET_MUXB1, 0x0); + WREG32(mmUVD_MPC_SET_ALU, 0); + WREG32(mmUVD_MPC_SET_MUX, 0x88); + + tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); + WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); + + /* enable UMC */ + WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); + + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + + mdelay(10); + + for (i = 0; i < 10; ++i) { + uint32_t status; + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); + WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + r = -1; + } + + if (r) { + DRM_ERROR("UVD not responding, giving up!!!\n"); + return r; + } + + /* enable interrupt */ + WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); + + WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); + + /* force RBC into idle state */ + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + + /* Set the write pointer delay */ + WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* programm the 4GB memory segment for rptr and ring buffer */ + WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | + (0x7 << 16) | (0x1 << 31)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(mmUVD_RBC_RB_RPTR, 0x0); + + ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); + WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + + /* set the ring address */ + WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); + + /* Set ring buffer size */ + rb_bufsz = order_base_2(ring->ring_size); + rb_bufsz = (0x1 << 8) | rb_bufsz; + WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); + + return 0; +} + +/** + * uvd_v3_1_stop - stop UVD block + * + * @adev: amdgpu_device pointer + * + * stop the UVD block + */ +static void uvd_v3_1_stop(struct amdgpu_device *adev) +{ + uint32_t i, j; + uint32_t status; + + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_STATUS); + if (status & 2) + break; + mdelay(1); + } + if (status & 2) + break; + } + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_LMI_STATUS); + if (status & 0xf) + break; + mdelay(1); + } + if (status & 0xf) + break; + } + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_LMI_STATUS); + if (status & 0x240) + break; + mdelay(1); + } + if (status & 0x240) + break; + } + + WREG32_P(0x3D49, 0, ~(1 << 2)); + + WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + + WREG32(mmUVD_STATUS, 0); + + uvd_v3_1_set_dcm(adev, false); +} + +static int uvd_v3_1_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int uvd_v3_1_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: UVD TRAP\n"); + amdgpu_fence_process(&adev->uvd.inst->ring); + return 0; +} + + +static const struct amdgpu_irq_src_funcs uvd_v3_1_irq_funcs = { + .set = uvd_v3_1_set_interrupt_state, + .process = uvd_v3_1_process_interrupt, +}; + +static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->uvd.inst->irq.num_types = 1; + adev->uvd.inst->irq.funcs = &uvd_v3_1_irq_funcs; +} + + +static int uvd_v3_1_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; + + uvd_v3_1_set_ring_funcs(adev); + uvd_v3_1_set_irq_funcs(adev); + + return 0; +} + +static int uvd_v3_1_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + void *ptr; + uint32_t ucode_len; + + /* UVD TRAP */ + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); + if (r) + return r; + + r = amdgpu_uvd_sw_init(adev); + if (r) + return r; + + ring = &adev->uvd.inst->ring; + sprintf(ring->name, "uvd"); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + /* Retrieval firmware validate key */ + ptr = adev->uvd.inst[0].cpu_addr; + ptr += 192 + 16; + memcpy(&ucode_len, ptr, 4); + ptr += ucode_len; + memcpy(&adev->uvd.keyselect, ptr, 4); + + r = amdgpu_uvd_entity_init(adev); + + return r; +} + +static int uvd_v3_1_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_uvd_suspend(adev); + if (r) + return r; + + return amdgpu_uvd_sw_fini(adev); +} + +static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { + data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); + data |= 0x3fff; + WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); + + orig = data = RREG32(mmUVD_CGC_CTRL); + data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + if (orig != data) + WREG32(mmUVD_CGC_CTRL, data); + } else { + data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); + data &= ~0x3fff; + WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); + + orig = data = RREG32(mmUVD_CGC_CTRL); + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + if (orig != data) + WREG32(mmUVD_CGC_CTRL, data); + } +} + +/** + * uvd_v3_1_hw_init - start and test UVD block + * + * @handle: handle used to pass amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int uvd_v3_1_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; + uint32_t tmp; + int r; + + uvd_v3_1_mc_resume(adev); + + r = uvd_v3_1_fw_validate(adev); + if (r) { + DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r); + return r; + } + + uvd_v3_1_enable_mgcg(adev, true); + amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); + + uvd_v3_1_start(adev); + + r = amdgpu_ring_test_helper(ring); + if (r) { + DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r); + goto done; + } + + r = amdgpu_ring_alloc(ring, 10); + if (r) { + DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); + goto done; + } + + tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); + amdgpu_ring_write(ring, 3); + + amdgpu_ring_commit(ring); + +done: + if (!r) + DRM_INFO("UVD initialized successfully.\n"); + + return r; +} + +/** + * uvd_v3_1_hw_fini - stop the hardware block + * + * @handle: handle used to pass amdgpu_device pointer + * + * Stop the UVD block, mark ring as not ready any more + */ +static int uvd_v3_1_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v3_1_stop(adev); + + return 0; +} + +static int uvd_v3_1_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_uvd(adev, false); + } else { + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); + } + + r = uvd_v3_1_hw_fini(adev); + if (r) + return r; + + return amdgpu_uvd_suspend(adev); +} + +static int uvd_v3_1_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_uvd_resume(adev); + if (r) + return r; + + return uvd_v3_1_hw_init(adev); +} + +static bool uvd_v3_1_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); +} + +static int uvd_v3_1_wait_for_idle(void *handle) +{ + unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + return 0; + } + return -ETIMEDOUT; +} + +static int uvd_v3_1_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + uvd_v3_1_stop(adev); + + WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, + ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + mdelay(5); + + return uvd_v3_1_start(adev); +} + +static int uvd_v3_1_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int uvd_v3_1_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static const struct amd_ip_funcs uvd_v3_1_ip_funcs = { + .name = "uvd_v3_1", + .early_init = uvd_v3_1_early_init, + .late_init = NULL, + .sw_init = uvd_v3_1_sw_init, + .sw_fini = uvd_v3_1_sw_fini, + .hw_init = uvd_v3_1_hw_init, + .hw_fini = uvd_v3_1_hw_fini, + .suspend = uvd_v3_1_suspend, + .resume = uvd_v3_1_resume, + .is_idle = uvd_v3_1_is_idle, + .wait_for_idle = uvd_v3_1_wait_for_idle, + .soft_reset = uvd_v3_1_soft_reset, + .set_clockgating_state = uvd_v3_1_set_clockgating_state, + .set_powergating_state = uvd_v3_1_set_powergating_state, +}; + +const struct amdgpu_ip_block_version uvd_v3_1_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &uvd_v3_1_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.h b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.h new file mode 100644 index 000000000000..8c2f9b207574 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __UVD_V3_1_H__ +#define __UVD_V3_1_H__ + +extern const struct amdgpu_ip_block_version uvd_v3_1_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 82abd8e728ab..c108b8381795 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle) ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -148,7 +149,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, /** * uvd_v4_2_hw_init - start and test UVD block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Initialize the hardware, boot up the VCPU and do some testing */ @@ -203,20 +204,19 @@ done: /** * uvd_v4_2_hw_fini - stop the hardware block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Stop the UVD block, mark ring as not ready any more */ static int uvd_v4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + + cancel_delayed_work_sync(&adev->uvd.idle_work); if (RREG32(mmUVD_STATUS) != 0) uvd_v4_2_stop(adev); - ring->sched.ready = false; - return 0; } @@ -225,6 +225,30 @@ static int uvd_v4_2_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_uvd(adev, false); + } else { + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); + } + r = uvd_v4_2_hw_fini(adev); if (r) return r; @@ -350,7 +374,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) /* Set the write pointer delay */ WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); - /* programm the 4GB memory segment for rptr and ring buffer */ + /* program the 4GB memory segment for rptr and ring buffer */ WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | (0x7 << 16) | (0x1 << 31)); @@ -439,7 +463,9 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) * uvd_v4_2_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write a fence and a trap command to the ring. */ @@ -504,7 +530,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) * uvd_v4_2_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: iob associated with the indirect buffer * @ib: indirect buffer to execute + * @flags: flags associated with the indirect buffer * * Write ring commands to execute the indirect buffer */ @@ -543,7 +571,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) uint64_t addr; uint32_t size; - /* programm the VCPU memory controller bits 0-27 */ + /* program the VCPU memory controller bits 0-27 */ addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 01e62fb8e6e0..563493d1f830 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle) ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle) /** * uvd_v5_0_hw_init - start and test UVD block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Initialize the hardware, boot up the VCPU and do some testing */ @@ -201,20 +202,19 @@ done: /** * uvd_v5_0_hw_fini - stop the hardware block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Stop the UVD block, mark ring as not ready any more */ static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + + cancel_delayed_work_sync(&adev->uvd.idle_work); if (RREG32(mmUVD_STATUS) != 0) uvd_v5_0_stop(adev); - ring->sched.ready = false; - return 0; } @@ -223,10 +223,33 @@ static int uvd_v5_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_uvd(adev, false); + } else { + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); + } + r = uvd_v5_0_hw_fini(adev); if (r) return r; - uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); return amdgpu_uvd_suspend(adev); } @@ -255,7 +278,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) uint64_t offset; uint32_t size; - /* programm memory controller bits 0-27 */ + /* program memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, @@ -406,7 +429,7 @@ static int uvd_v5_0_start(struct amdgpu_device *adev) /* set the wb address */ WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); - /* programm the RB_BASE for ring buffer */ + /* program the RB_BASE for ring buffer */ WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -456,7 +479,9 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev) * uvd_v5_0_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write a fence and a trap command to the ring. */ @@ -520,7 +545,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) * uvd_v5_0_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write ring commands to execute the indirect buffer */ @@ -763,7 +790,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 217084d56ab8..d5d023a24269 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -198,9 +198,9 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) /** * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg * - * @adev: amdgpu_device pointer * @ring: ring we should submit the msg to * @handle: session handle to use + * @bo: amdgpu object for which we query the offset * @fence: optional fence to return * * Open up a stream for HW test @@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -260,9 +261,9 @@ err: /** * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg * - * @adev: amdgpu_device pointer * @ring: ring we should submit the msg to * @handle: session handle to use + * @bo: amdgpu object for which we query the offset * @fence: optional fence to return * * Close up a stream for HW test or if userspace failed to do so @@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -324,20 +326,15 @@ err: * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working * * @ring: the engine to test on + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * */ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -354,8 +351,6 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -416,7 +411,8 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -428,7 +424,9 @@ static int uvd_v6_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst->ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->uvd.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -459,7 +457,7 @@ static int uvd_v6_0_sw_fini(void *handle) /** * uvd_v6_0_hw_init - start and test UVD block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Initialize the hardware, boot up the VCPU and do some testing */ @@ -528,20 +526,41 @@ done: /** * uvd_v6_0_hw_fini - stop the hardware block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Stop the UVD block, mark ring as not ready any more */ static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_uvd(adev, false); + } else { + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); + } if (RREG32(mmUVD_STATUS) != 0) uvd_v6_0_stop(adev); - ring->sched.ready = false; - return 0; } @@ -581,7 +600,7 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) uint64_t offset; uint32_t size; - /* programm memory controller bits 0-27 */ + /* program memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, @@ -823,7 +842,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) /* set the wb address */ WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); - /* programm the RB_BASE for ring buffer */ + /* program the RB_BASE for ring buffer */ WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -889,7 +908,9 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev) * uvd_v6_0_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write a fence and a trap command to the ring. */ @@ -919,7 +940,9 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write enc a fence and a trap command to the ring. */ @@ -984,7 +1007,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) * uvd_v6_0_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write ring commands to execute the indirect buffer */ @@ -1010,7 +1035,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrive vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write enc ring commands to execute the indirect buffer */ @@ -1238,8 +1265,8 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, break; } - if (false == int_handled) - DRM_ERROR("Unhandled interrupt: %d %d\n", + if (!int_handled) + DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); return 0; @@ -1421,7 +1448,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 0995378d8263..b483f03b4591 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -36,7 +36,6 @@ #include "vce/vce_4_0_default.h" #include "vce/vce_4_0_sh_mask.h" #include "nbif/nbif_6_1_offset.h" -#include "hdp/hdp_4_0_offset.h" #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h" @@ -206,9 +205,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) /** * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg * - * @adev: amdgpu_device pointer * @ring: ring we should submit the msg to * @handle: session handle to use + * @bo: amdgpu object for which we query the offset * @fence: optional fence to return * * Open up a stream for HW test @@ -224,7 +223,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -268,9 +268,9 @@ err: /** * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg * - * @adev: amdgpu_device pointer * @ring: ring we should submit the msg to * @handle: session handle to use + * @bo: amdgpu object for which we query the offset * @fence: optional fence to return * * Close up a stream for HW test or if userspace failed to do so @@ -286,7 +286,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -331,20 +332,15 @@ err: * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working * * @ring: the engine to test on + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * */ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -361,8 +357,6 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -450,7 +444,9 @@ static int uvd_v7_0_sw_init(void *handle) if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; sprintf(ring->name, "uvd_%d", ring->me); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->uvd.inst[j].irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -469,7 +465,9 @@ static int uvd_v7_0_sw_init(void *handle) else ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1; } - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->uvd.inst[j].irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; } @@ -513,7 +511,7 @@ static int uvd_v7_0_sw_fini(void *handle) /** * uvd_v7_0_hw_init - start and test UVD block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Initialize the hardware, boot up the VCPU and do some testing */ @@ -591,14 +589,15 @@ done: /** * uvd_v7_0_hw_fini - stop the hardware block * - * @adev: amdgpu_device pointer + * @handle: handle used to pass amdgpu_device pointer * * Stop the UVD block, mark ring as not ready any more */ static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + + cancel_delayed_work_sync(&adev->uvd.idle_work); if (!amdgpu_sriov_vf(adev)) uvd_v7_0_stop(adev); @@ -607,12 +606,6 @@ static int uvd_v7_0_hw_fini(void *handle) DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { - if (adev->uvd.harvest_config & (1 << i)) - continue; - adev->uvd.inst[i].ring.sched.ready = false; - } - return 0; } @@ -621,6 +614,30 @@ static int uvd_v7_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_uvd(adev, false); + } else { + amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); + } + r = uvd_v7_0_hw_fini(adev); if (r) return r; @@ -1074,7 +1091,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); - /* programm the RB_BASE for ring buffer */ + /* program the RB_BASE for ring buffer */ WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -1148,7 +1165,9 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev) * uvd_v7_0_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write a fence and a trap command to the ring. */ @@ -1187,7 +1206,9 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write enc a fence and a trap command to the ring. */ @@ -1283,7 +1304,9 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, * uvd_v7_0_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write ring commands to execute the indirect buffer */ @@ -1314,7 +1337,9 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrive vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write enc ring commands to execute the indirect buffer */ @@ -1376,7 +1401,7 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for reg writes */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask); @@ -1418,7 +1443,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for reg writes */ - uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2, + uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + + vmid * hub->ctx_addr_distance, lower_32_bits(pd_addr), 0xffffffff); } @@ -1694,7 +1720,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); uvd_v7_0_set_bypass_mode(adev, enable); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index b6837fcfdba7..67eb01fef789 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -431,10 +431,12 @@ static int vce_v2_0_sw_init(void *handle) return r; for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); - r = amdgpu_ring_init(adev, ring, 512, - &adev->vce.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + hw_prio, NULL); if (r) return r; } @@ -477,6 +479,10 @@ static int vce_v2_0_hw_init(void *handle) static int vce_v2_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + cancel_delayed_work_sync(&adev->vce.idle_work); + return 0; } @@ -485,6 +491,30 @@ static int vce_v2_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + } + r = vce_v2_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 475ae68f38f5..142e291983b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -440,9 +440,12 @@ static int vce_v3_0_sw_init(void *handle) return r; for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + hw_prio, NULL); if (r) return r; } @@ -489,6 +492,8 @@ static int vce_v3_0_hw_fini(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vce.idle_work); + r = vce_v3_0_wait_for_idle(handle); if (r) return r; @@ -502,6 +507,29 @@ static int vce_v3_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + } + r = vce_v3_0_hw_fini(adev); if (r) return r; @@ -739,7 +767,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 683701cf7270..d1fc4e0b8265 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -25,6 +25,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vce.h" @@ -462,6 +463,8 @@ static int vce_v4_0_sw_init(void *handle) } for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { @@ -476,7 +479,8 @@ static int vce_v4_0_sw_init(void *handle) else ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; } - r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, + hw_prio, NULL); if (r) return r; } @@ -539,7 +543,8 @@ static int vce_v4_0_hw_init(void *handle) static int vce_v4_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + + cancel_delayed_work_sync(&adev->vce.idle_work); if (!amdgpu_sriov_vf(adev)) { /* vce_v4_0_wait_for_idle(handle); */ @@ -549,25 +554,48 @@ static int vce_v4_0_hw_fini(void *handle) DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - for (i = 0; i < adev->vce.num_rings; i++) - adev->vce.ring[i].sched.ready = false; - return 0; } static int vce_v4_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; + int r, idx; if (adev->vce.vcpu_bo == NULL) return 0; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); - void *ptr = adev->vce.cpu_addr; + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); + void *ptr = adev->vce.cpu_addr; - memcpy_fromio(adev->vce.saved_bo, ptr, size); + memcpy_fromio(adev->vce.saved_bo, ptr, size); + } + drm_dev_exit(idx); + } + + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); } r = vce_v4_0_hw_fini(adev); @@ -580,16 +608,20 @@ static int vce_v4_0_suspend(void *handle) static int vce_v4_0_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; + int r, idx; if (adev->vce.vcpu_bo == NULL) return -EINVAL; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); - void *ptr = adev->vce.cpu_addr; - memcpy_toio(ptr, adev->vce.saved_bo, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); + void *ptr = adev->vce.cpu_addr; + + memcpy_toio(ptr, adev->vce.saved_bo, size); + drm_dev_exit(idx); + } } else { r = amdgpu_vce_resume(adev); if (r) @@ -887,7 +919,7 @@ static int vce_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if ((adev->asic_type == CHIP_POLARIS10) || @@ -994,7 +1026,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for reg writes */ - vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2, + vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + + vmid * hub->ctx_addr_distance, lower_32_bits(pd_addr), 0xffffffff); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index b4f84a820a44..d54d720b3cf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -25,32 +25,35 @@ #include "amdgpu.h" #include "amdgpu_vcn.h" +#include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" #include "soc15_common.h" #include "vcn/vcn_1_0_offset.h" #include "vcn/vcn_1_0_sh_mask.h" -#include "hdp/hdp_4_0_offset.h" #include "mmhub/mmhub_9_1_offset.h" #include "mmhub/mmhub_9_1_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" +#include "jpeg_v1_0.h" +#include "vcn_v1_0.h" -#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab -#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 -#define mmUVD_REG_XX_MASK 0x05ac -#define mmUVD_REG_XX_MASK_BASE_IDX 1 +#define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab +#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1 +#define mmUVD_REG_XX_MASK_1_0 0x05ac +#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1 static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); -static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state); + int inst_idx, struct dpg_pause_state *new_state); + +static void vcn_v1_0_idle_work_handler(struct work_struct *work); +static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); /** * vcn_v1_0_early_init - set function pointers @@ -63,14 +66,14 @@ static int vcn_v1_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->vcn.num_vcn_inst = 1; adev->vcn.num_enc_rings = 2; vcn_v1_0_set_dec_ring_funcs(adev); vcn_v1_0_set_enc_ring_funcs(adev); - vcn_v1_0_set_jpeg_ring_funcs(adev); vcn_v1_0_set_irq_funcs(adev); + jpeg_v1_0_early_init(handle); + return 0; } @@ -101,24 +104,14 @@ static int vcn_v1_0_sw_init(void *handle) return r; } - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq); - if (r) - return r; - r = amdgpu_vcn_sw_init(adev); if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - DRM_INFO("PSP loading VCN firmware\n"); - } + /* Override the work func */ + adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; + + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -126,7 +119,8 @@ static int vcn_v1_0_sw_init(void *handle) ring = &adev->vcn.inst->ring_dec; sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -142,24 +136,21 @@ static int vcn_v1_0_sw_init(void *handle) SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst->ring_enc[i]; sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + hw_prio, NULL); if (r) return r; } - ring = &adev->vcn.inst->ring_jpeg; - sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); - if (r) - return r; - adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode; - adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch = - SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); - return 0; + r = jpeg_v1_0_sw_init(handle); + + return r; } /** @@ -178,6 +169,8 @@ static int vcn_v1_0_sw_fini(void *handle) if (r) return r; + jpeg_v1_0_sw_fini(handle); + r = amdgpu_vcn_sw_fini(adev); return r; @@ -207,7 +200,7 @@ static int vcn_v1_0_hw_init(void *handle) goto done; } - ring = &adev->vcn.inst->ring_jpeg; + ring = &adev->jpeg.inst->ring_dec; r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -230,13 +223,14 @@ done: static int vcn_v1_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; + + cancel_delayed_work_sync(&adev->vcn.idle_work); if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || - RREG32_SOC15(VCN, 0, mmUVD_STATUS)) + (adev->vcn.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(VCN, 0, mmUVD_STATUS))) { vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); - - ring->sched.ready = false; + } return 0; } @@ -364,68 +358,68 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, 0xFFFFFFFF, 0); offset = 0; } else { - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); offset = size; - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); } - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); /* cache window 1: stack */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, 0xFFFFFFFF, 0); /* cache window 2: context */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, 0xFFFFFFFF, 0); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); } @@ -433,7 +427,6 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) * vcn_v1_0_disable_clock_gating - disable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Disable clock gating for VCN block */ @@ -560,7 +553,6 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev) * vcn_v1_0_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Enable clock gating for VCN block */ @@ -640,9 +632,9 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); - WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); /* enable sw clock gating control */ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -671,22 +663,21 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__SCPU_MODE_MASK); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); /* turn off clock gating */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); /* turn on SUVD clock gating */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); } static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; - int ret; if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT @@ -702,7 +693,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF); } else { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT @@ -716,7 +707,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF); } /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ @@ -732,7 +723,6 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; - int ret; if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { /* Before power off, this indicator has to be turned on */ @@ -767,12 +757,12 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF); } } /** - * vcn_v1_0_start - start VCN block + * vcn_v1_0_start_spg_mode - start VCN block * * @adev: amdgpu_device pointer * @@ -838,9 +828,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) vcn_v1_0_mc_resume_spg_mode(adev); - WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10); - WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, - RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3); + WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10); + WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0, + RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3); /* enable VCPU clock */ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); @@ -915,7 +905,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); - /* programm the RB_BASE for ring buffer */ + /* program the RB_BASE for ring buffer */ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -947,22 +937,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); - ring = &adev->vcn.inst->ring_jpeg; - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | - UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - - /* initialize wptr */ - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - /* copy patch commands to the jpeg ring */ - vcn_v1_0_jpeg_ring_set_patch_ring(ring, - (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); + jpeg_v1_0_start(adev, 0); return 0; } @@ -991,14 +966,14 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); /* disable interupt */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0); /* initialize VCN memory controller */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | @@ -1012,48 +987,48 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) /* swap (8 in 32) RB and IB */ lmi_swap_cntl = 0xa; #endif - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL, 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0, ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0, ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX, ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0); vcn_v1_0_mc_resume_dpg_mode(adev); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); /* boot up the VCPU */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); /* enable UMC */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2, 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0xFFFFFFFF, 0); /* enable master interrupt */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0); vcn_v1_0_clock_gating_dpg_mode(adev, 1); /* setup mmUVD_LMI_CTRL */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | @@ -1065,11 +1040,11 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) tmp = adev->gfx.config.gb_addr_config; /* setup VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); /* enable System Interrupt for JRBC */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN, UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1); /* force RBC into idle state */ @@ -1088,7 +1063,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); - /* programm the RB_BASE for ring buffer */ + /* program the RB_BASE for ring buffer */ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -1106,13 +1081,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - /* initialize JPEG wptr */ - ring = &adev->vcn.inst->ring_jpeg; - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - /* copy patch commands to the jpeg ring */ - vcn_v1_0_jpeg_ring_set_patch_ring(ring, - (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); + jpeg_v1_0_start(adev, 1); return 0; } @@ -1129,7 +1098,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) } /** - * vcn_v1_0_stop - stop VCN block + * vcn_v1_0_stop_spg_mode - stop VCN block * * @adev: amdgpu_device pointer * @@ -1137,24 +1106,24 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) */ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) { - int ret_code, tmp; + int tmp; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); - /* put VCPU into reset */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + /* stall UMC channel */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); /* disable VCPU clock */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, @@ -1169,6 +1138,11 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) UVD_SOFT_RESET__LMI_SOFT_RESET_MASK, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); + /* put VCPU into reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0); vcn_v1_0_enable_clock_gating(adev); @@ -1178,30 +1152,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) { - int ret_code = 0; uint32_t tmp; /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* wait for read ptr to be equal to write ptr */ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, @@ -1223,7 +1196,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) } static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state) + int inst_idx, struct dpg_pause_state *new_state) { int ret_code; uint32_t reg_data = 0; @@ -1231,9 +1204,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, struct amdgpu_ring *ring; /* pause/unpause if state is changed */ - if (adev->vcn.pause_state.fw_based != new_state->fw_based) { + if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", - adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg, + adev->vcn.inst[inst_idx].pause_state.fw_based, + adev->vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state->jpeg); reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & @@ -1243,9 +1217,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ret_code = 0; if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)) - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { /* pause DPG non-jpeg */ @@ -1253,7 +1227,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, - UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); /* Restore */ ring = &adev->vcn.inst->ring_enc[0]; @@ -1275,20 +1249,21 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } } else { /* unpause dpg non-jpeg, no need to wait */ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); } - adev->vcn.pause_state.fw_based = new_state->fw_based; + adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; } /* pause/unpause if state is changed */ - if (adev->vcn.pause_state.jpeg != new_state->jpeg) { + if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) { DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", - adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg, + adev->vcn.inst[inst_idx].pause_state.fw_based, + adev->vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state->jpeg); reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & @@ -1298,9 +1273,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ret_code = 0; if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)) - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { /* Make sure JPRG Snoop is disabled before sending the pause */ @@ -1313,10 +1288,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, - UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); + UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); /* Restore */ - ring = &adev->vcn.inst->ring_jpeg; + ring = &adev->jpeg.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | @@ -1335,14 +1310,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } } else { /* unpause dpg jpeg, no need to wait */ reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); } - adev->vcn.pause_state.jpeg = new_state->jpeg; + adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg; } return 0; @@ -1358,10 +1333,10 @@ static bool vcn_v1_0_is_idle(void *handle) static int vcn_v1_0_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; + int ret; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE, ret); + ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); return ret; } @@ -1370,11 +1345,11 @@ static int vcn_v1_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ - if (vcn_v1_0_is_idle(handle)) + if (!vcn_v1_0_is_idle(handle)) return -EBUSY; vcn_v1_0_enable_clock_gating(adev); } else { @@ -1469,7 +1444,9 @@ static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring) * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write a fence and a trap command to the ring. */ @@ -1508,7 +1485,9 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write ring commands to execute the indirect buffer */ @@ -1564,7 +1543,7 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); @@ -1643,7 +1622,9 @@ static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring) * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write enc a fence and a trap command to the ring. */ @@ -1668,7 +1649,9 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrive vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write enc ring commands to execute the indirect buffer */ @@ -1704,7 +1687,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for reg writes */ - vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2, + vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + + vmid * hub->ctx_addr_distance, lower_32_bits(pd_addr), 0xffffffff); } @@ -1716,389 +1700,6 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } - -/** - * vcn_v1_0_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v1_0_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v1_0_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); -} - -/** - * vcn_v1_0_jpeg_ring_insert_start - insert a start command - * - * @ring: amdgpu_ring pointer - * - * Write a start command to the ring. - */ -static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80010000); -} - -/** - * vcn_v1_0_jpeg_ring_insert_end - insert a end command - * - * @ring: amdgpu_ring pointer - * - * Write a end command to the ring. - */ -static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00010000); -} - -/** - * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command - * - * @ring: amdgpu_ring pointer - * @fence: fence to emit - * - * Write a fence and a trap command to the ring. - */ -static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) -{ - struct amdgpu_device *adev = ring->adev; - - WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0xffffffff); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - - /* emit trap */ - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); - amdgpu_ring_write(ring, 0); -} - -/** - * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer - * - * @ring: amdgpu_ring pointer - * @ib: indirect buffer to execute - * - * Write ring commands to execute the indirect buffer. - */ -static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) -{ - struct amdgpu_device *adev = ring->adev; - unsigned vmid = AMDGPU_JOB_GET_VMID(job); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, ib->length_dw); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); - amdgpu_ring_write(ring, 0x2); -} - -static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val, - uint32_t mask) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, val); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE3)); - } - amdgpu_ring_write(ring, mask); -} - -static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) -{ - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t data0, data1, mask; - - pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); - - /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; - data1 = lower_32_bits(pd_addr); - mask = 0xffffffff; - vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); -} - -static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - } - amdgpu_ring_write(ring, val); -} - -static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) -{ - int i; - - WARN_ON(ring->wptr % 2 || count % 2); - - for (i = 0; i < count / 2; i++) { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } -} - -static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) -{ - struct amdgpu_device *adev = ring->adev; - ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - ring->ring[(*ptr)++] = 0; - ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); - } else { - ring->ring[(*ptr)++] = reg_offset; - ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); - } - ring->ring[(*ptr)++] = val; -} - -static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) -{ - struct amdgpu_device *adev = ring->adev; - - uint32_t reg, reg_offset, val, mask, i; - - // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW); - reg_offset = (reg << 2); - val = lower_32_bits(ring->gpu_addr); - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH); - reg_offset = (reg << 2); - val = upper_32_bits(ring->gpu_addr); - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 3rd to 5th: issue MEM_READ commands - for (i = 0; i <= 2; i++) { - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2); - ring->ring[ptr++] = 0; - } - - // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x13; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 7th: program mmUVD_JRBC_RB_REF_DATA - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA); - reg_offset = (reg << 2); - val = 0x1; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x1; - mask = 0x1; - - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0); - ring->ring[ptr++] = 0x01400200; - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0); - ring->ring[ptr++] = val; - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - ring->ring[ptr++] = 0; - ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3); - } else { - ring->ring[ptr++] = reg_offset; - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3); - } - ring->ring[ptr++] = mask; - - //9th to 21st: insert no-op - for (i = 0; i <= 12; i++) { - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); - ring->ring[ptr++] = 0; - } - - //22nd: reset mmUVD_JRBC_RB_RPTR - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR); - reg_offset = (reg << 2); - val = 0; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x12; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); -} - static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -2123,9 +1724,6 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, case 120: amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; - case 126: - amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2174,6 +1772,105 @@ static int vcn_v1_0_set_powergating_state(void *handle, return ret; } +static void vcn_v1_0_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, vcn.idle_work.work); + unsigned int fences = 0, i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + + if (fences) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + adev->vcn.pause_dpg_mode(adev, 0, &new_state); + } + + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); + + if (fences == 0) { + amdgpu_gfx_off_ctrl(adev, true); + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); + } else { + schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); + } +} + +static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); + + mutex_lock(&adev->vcn.vcn1_jpeg1_workaround); + + if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec)) + DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n"); + + vcn_v1_0_set_pg_for_begin_use(ring, set_clocks); + +} + +void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks) +{ + struct amdgpu_device *adev = ring->adev; + + if (set_clocks) { + amdgpu_gfx_off_ctrl(adev, false); + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + } + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + unsigned int fences = 0, i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); + + if (fences) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + + adev->vcn.pause_dpg_mode(adev, 0, &new_state); + } +} + +void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); + mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround); +} + static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", .early_init = vcn_v1_0_early_init, @@ -2220,8 +1917,8 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .insert_start = vcn_v1_0_dec_ring_insert_start, .insert_end = vcn_v1_0_dec_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, + .begin_use = vcn_v1_0_ring_begin_use, + .end_use = vcn_v1_0_ring_end_use, .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, @@ -2252,48 +1949,13 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .insert_nop = amdgpu_ring_insert_nop, .insert_end = vcn_v1_0_enc_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, + .begin_use = vcn_v1_0_ring_begin_use, + .end_use = vcn_v1_0_ring_end_use, .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .nop = PACKET0(0x81ff, 0), - .support_64bit_ptrs = false, - .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, - .extra_dw = 64, - .get_rptr = vcn_v1_0_jpeg_ring_get_rptr, - .get_wptr = vcn_v1_0_jpeg_ring_get_wptr, - .set_wptr = vcn_v1_0_jpeg_ring_set_wptr, - .emit_frame_size = - 6 + 6 + /* hdp invalidate / flush */ - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */ - 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */ - 6, - .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v1_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v1_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v1_0_jpeg_ring_nop, - .insert_start = vcn_v1_0_jpeg_ring_insert_start, - .insert_end = vcn_v1_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; @@ -2310,12 +1972,6 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev) DRM_INFO("VCN encode is enabled in VM mode\n"); } -static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs; - DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); -} - static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { .set = vcn_v1_0_set_interrupt_state, .process = vcn_v1_0_process_interrupt, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h index 2a497a7a4840..1f1cc7f0ece7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h @@ -24,6 +24,9 @@ #ifndef __VCN_V1_0_H__ #define __VCN_V1_0_H__ +void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring); +void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks); + extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 38f787a560cb..313fc1b53999 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -22,6 +22,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vcn.h" @@ -29,6 +30,8 @@ #include "soc15d.h" #include "amdgpu_pm.h" #include "amdgpu_psp.h" +#include "mmsch_v2_0.h" +#include "vcn_v2_0.h" #include "vcn/vcn_2_0_0_offset.h" #include "vcn/vcn_2_0_0_sh_mask.h" @@ -43,44 +46,18 @@ #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1 -#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2 -#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff -#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 -#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a -#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb -#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf -#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 -#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed -#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 -#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 -#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 -#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f - -#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 - -#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b -#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 -#define mmUVD_REG_XX_MASK 0x026c -#define mmUVD_REG_XX_MASK_BASE_IDX 1 - static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state); - + int inst_idx, struct dpg_pause_state *new_state); +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** * vcn_v2_0_early_init - set function pointers * @@ -92,12 +69,13 @@ static int vcn_v2_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->vcn.num_vcn_inst = 1; - adev->vcn.num_enc_rings = 2; + if (amdgpu_sriov_vf(adev)) + adev->vcn.num_enc_rings = 1; + else + adev->vcn.num_enc_rings = 2; vcn_v2_0_set_dec_ring_funcs(adev); vcn_v2_0_set_enc_ring_funcs(adev); - vcn_v2_0_set_jpeg_ring_funcs(adev); vcn_v2_0_set_irq_funcs(adev); return 0; @@ -115,6 +93,7 @@ static int vcn_v2_0_sw_init(void *handle) struct amdgpu_ring *ring; int i, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, @@ -132,25 +111,11 @@ static int vcn_v2_0_sw_init(void *handle) return r; } - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq); - if (r) - return r; - r = amdgpu_vcn_sw_init(adev); if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - DRM_INFO("PSP loading VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -162,7 +127,8 @@ static int vcn_v2_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; @@ -185,28 +151,29 @@ static int vcn_v2_0_sw_init(void *handle) adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + else + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, + hw_prio, NULL); if (r) return r; } - ring = &adev->vcn.inst->ring_jpeg; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); - if (r) - return r; - adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; - adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + fw_shared = adev->vcn.inst->fw_shared_cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG); return 0; } @@ -219,8 +186,16 @@ static int vcn_v2_0_sw_init(void *handle) */ static int vcn_v2_0_sw_fini(void *handle) { - int r; + int r, idx; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + fw_shared->present_flag_0 = 0; + drm_dev_exit(idx); + } + + amdgpu_virt_free_mm_table(adev); r = amdgpu_vcn_suspend(adev); if (r) @@ -247,10 +222,17 @@ static int vcn_v2_0_hw_init(void *handle) adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); + if (amdgpu_sriov_vf(adev)) + vcn_v2_0_start_sriov(adev); + r = amdgpu_ring_test_helper(ring); if (r) goto done; + //Disable vcn decode for sriov + if (amdgpu_sriov_vf(adev)) + ring->sched.ready = false; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; r = amdgpu_ring_test_helper(ring); @@ -258,11 +240,6 @@ static int vcn_v2_0_hw_init(void *handle) goto done; } - ring = &adev->vcn.inst->ring_jpeg; - r = amdgpu_ring_test_helper(ring); - if (r) - goto done; - done: if (!r) DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", @@ -281,24 +258,14 @@ done: static int vcn_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; - int i; + + cancel_delayed_work_sync(&adev->vcn.idle_work); if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, 0, mmUVD_STATUS))) vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); - ring->sched.ready = false; - - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = false; - } - - ring = &adev->vcn.inst->ring_jpeg; - ring->sched.ready = false; - return 0; } @@ -356,6 +323,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t offset; + if (amdgpu_sriov_vf(adev)) + return; + /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, @@ -392,8 +362,16 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + /* non-cache window */ + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); + WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect) @@ -404,88 +382,91 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (!indirect) { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } else { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } offset = 0; } else { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); offset = size; - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); } if (!indirect) - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); else - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); /* cache window 1: stack */ if (!indirect) { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } else { - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); /* cache window 2: context */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); /* non-cache window */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( - UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } @@ -493,7 +474,6 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec * vcn_v2_0_disable_clock_gating - disable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Disable clock gating for VCN block */ @@ -501,6 +481,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + /* UVD disable CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -631,150 +614,26 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev, UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__SCPU_MODE_MASK); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); /* turn off clock gating */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); /* turn on SUVD clock gating */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); } /** - * jpeg_v2_0_start - start JPEG block - * - * @adev: amdgpu_device pointer - * - * Setup and start the JPEG block - */ -static int jpeg_v2_0_start(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg; - uint32_t tmp; - int r = 0; - - /* disable power gating */ - tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp); - - SOC15_WAIT_ON_RREG(VCN, 0, - mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - - if (r) { - DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); - return r; - } - - /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */ - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp); - - /* JPEG disable CGC */ - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JPEG_ENC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - return 0; -} - -/** - * jpeg_v2_0_stop - stop JPEG block - * - * @adev: amdgpu_device pointer - * - * stop the JPEG block - */ -static int jpeg_v2_0_stop(struct amdgpu_device *adev) -{ - uint32_t tmp; - int r = 0; - - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable JPEG CGC */ - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JPEG_ENC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - /* enable power gating */ - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)); - tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; - tmp |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp); - - tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp); - - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, - (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - - if (r) { - DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); - return r; - } - - return r; -} - -/** * vcn_v2_0_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Enable clock gating for VCN block */ @@ -782,6 +641,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data = 0; + if (amdgpu_sriov_vf(adev)) + return; + /* enable UVD CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -832,7 +694,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev) static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; - int ret; + + if (amdgpu_sriov_vf(adev)) + return; if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT @@ -848,7 +712,7 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF, ret); + UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF); } else { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT @@ -861,7 +725,7 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF); } /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS, @@ -879,7 +743,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; - int ret; + + if (amdgpu_sriov_vf(adev)) + return; if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { /* Before power off, this indicator has to be turned on */ @@ -912,12 +778,13 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF); } } static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; @@ -930,7 +797,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp); if (indirect) - adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr; + adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr; /* enable clock gating */ vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect); @@ -939,11 +806,11 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* disable master interupt */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); /* setup mmUVD_LMI_CTRL */ @@ -955,28 +822,28 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 0x00100000L); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_MPC_CNTL), 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_MPC_SET_MUXA0), ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_MPC_SET_MUXB0), ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_MPC_SET_MUX), ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | @@ -984,29 +851,29 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) vcn_v2_0_mc_resume_dpg_mode(adev, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); /* release VCPU reset to boot */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect); /* enable LMI MC and UMC channels */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_LMI_CTRL2), 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect); /* enable master interrupt */ - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( UVD, 0, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) - psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr, - (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr - - (uintptr_t)adev->vcn.dpg_sram_cpu_addr)); + psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr, + (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr)); /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); @@ -1017,6 +884,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; + /* set the write pointer delay */ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); @@ -1024,7 +897,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); - /* programm the RB_BASE for ring buffer */ + /* program the RB_BASE for ring buffer */ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -1039,11 +912,16 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); return 0; } static int vcn_v2_0_start(struct amdgpu_device *adev) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; @@ -1052,12 +930,8 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram); - if (r) - return r; - goto jpeg; - } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram); vcn_v2_0_disable_static_power_gating(adev); @@ -1182,7 +1056,8 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); - /* programm the RB_BASE for ring buffer */ + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; + /* program the RB_BASE for ring buffer */ WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, @@ -1194,51 +1069,49 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; -jpeg: - r = jpeg_v2_0_start(adev); - - return r; + return 0; } static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev) { - int ret_code = 0; uint32_t tmp; /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* wait for read ptr to be equal to write ptr */ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); - - tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, @@ -1252,10 +1125,6 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) uint32_t tmp; int r; - r = jpeg_v2_0_stop(adev); - if (r) - return r; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v2_0_stop_dpg_mode(adev); if (r) @@ -1264,7 +1133,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) } /* wait for uvd idle */ - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); + r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); if (r) return r; @@ -1272,7 +1141,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); + r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp); if (r) return r; @@ -1283,7 +1152,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); + r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp); if (r) return r; @@ -1320,25 +1189,25 @@ power_off: } static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, - struct dpg_pause_state *new_state) + int inst_idx, struct dpg_pause_state *new_state) { struct amdgpu_ring *ring; uint32_t reg_data = 0; int ret_code; /* pause/unpause if state is changed */ - if (adev->vcn.pause_state.fw_based != new_state->fw_based) { + if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { DRM_DEBUG("dpg pause state changed %d -> %d", - adev->vcn.pause_state.fw_based, new_state->fw_based); + adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { - ret_code = 0; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; /* pause DPG */ reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); @@ -1346,36 +1215,51 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, /* wait for ACK */ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, - UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); /* Restore */ + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[0]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst->ring_enc[1]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } } else { /* unpause dpg, no need to wait */ reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); } - adev->vcn.pause_state.fw_based = new_state->fw_based; + adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; } return 0; @@ -1391,10 +1275,10 @@ static bool vcn_v2_0_is_idle(void *handle) static int vcn_v2_0_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; + int ret; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE, ret); + ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); return ret; } @@ -1403,11 +1287,14 @@ static int vcn_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); + + if (amdgpu_sriov_vf(adev)) + return 0; if (enable) { /* wait for STATUS to clear */ - if (vcn_v2_0_is_idle(handle)) + if (!vcn_v2_0_is_idle(handle)) return -EBUSY; vcn_v2_0_enable_clock_gating(adev); } else { @@ -1507,6 +1394,7 @@ void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) * vcn_v2_0_dec_ring_insert_nop - insert a nop command * * @ring: amdgpu_ring pointer + * @count: the number of NOP packets to insert * * Write a nop command to the ring. */ @@ -1527,7 +1415,9 @@ void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write a fence and a trap command to the ring. */ @@ -1564,7 +1454,9 @@ void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrieve vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write ring commands to execute the indirect buffer */ @@ -1615,7 +1507,7 @@ void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); @@ -1710,7 +1602,9 @@ static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring) * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command * * @ring: amdgpu_ring pointer - * @fence: fence to emit + * @addr: address + * @seq: sequence number + * @flags: fence related flags * * Write enc a fence and a trap command to the ring. */ @@ -1735,7 +1629,9 @@ void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring) * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer * * @ring: amdgpu_ring pointer + * @job: job to retrive vmid from * @ib: indirect buffer to execute + * @flags: unused * * Write enc ring commands to execute the indirect buffer */ @@ -1770,7 +1666,8 @@ void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for reg writes */ - vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2, + vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + + vmid * hub->ctx_addr_distance, lower_32_bits(pd_addr), 0xffffffff); } @@ -1781,272 +1678,6 @@ void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_ amdgpu_ring_write(ring, val); } -/** - * vcn_v2_0_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v2_0_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; - else - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v2_0_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) { - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); - WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); - } else { - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); - } -} - -/** - * vcn_v2_0_jpeg_ring_insert_start - insert a start command - * - * @ring: amdgpu_ring pointer - * - * Write a start command to the ring. - */ -void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80010000); -} - -/** - * vcn_v2_0_jpeg_ring_insert_end - insert a end command - * - * @ring: amdgpu_ring pointer - * - * Write a end command to the ring. - */ -void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00010000); -} - -/** - * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command - * - * @ring: amdgpu_ring pointer - * @fence: fence to emit - * - * Write a fence and a trap command to the ring. - */ -void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) -{ - WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); - amdgpu_ring_write(ring, 0); -} - -/** - * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer - * - * @ring: amdgpu_ring pointer - * @ib: indirect buffer to execute - * - * Write ring commands to execute the indirect buffer. - */ -void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) -{ - unsigned vmid = AMDGPU_JOB_GET_VMID(job); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, ib->length_dw); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); - amdgpu_ring_write(ring, 0x2); -} - -void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, val); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE3)); - } - amdgpu_ring_write(ring, mask); -} - -void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) -{ - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t data0, data1, mask; - - pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); - - /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; - data1 = lower_32_bits(pd_addr); - mask = 0xffffffff; - vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); -} - -void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) -{ - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - } - amdgpu_ring_write(ring, val); -} - -void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) -{ - int i; - - WARN_ON(ring->wptr % 2 || count % 2); - - for (i = 0; i < count / 2; i++) { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } -} - static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -2071,9 +1702,6 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; - case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2083,13 +1711,16 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) +int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 4); if (r) @@ -2126,6 +1757,11 @@ static int vcn_v2_0_set_powergating_state(void *handle, int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_sriov_vf(adev)) { + adev->vcn.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->vcn.cur_state) return 0; @@ -2139,6 +1775,211 @@ static int vcn_v2_0_set_powergating_state(void *handle, return ret; } +static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev, + struct amdgpu_mm_table *table) +{ + uint32_t data = 0, loop; + uint64_t addr = table->gpu_addr; + struct mmsch_v2_0_init_header *header; + uint32_t size; + int i; + + header = (struct mmsch_v2_0_init_header *)table->cpu_addr; + size = header->header_size + header->vcn_table_size; + + /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr + * of memory descriptor location + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); + + /* 2, update vmid of descriptor */ + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); + data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); + + /* 3, notify mmsch about the size of this descriptor */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + + adev->vcn.inst->ring_dec.wptr = 0; + adev->vcn.inst->ring_dec.wptr_old = 0; + vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec); + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + adev->vcn.inst->ring_enc[i].wptr = 0; + adev->vcn.inst->ring_enc[i].wptr_old = 0; + vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]); + } + + /* 5, kick off the initialization and wait until + * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); + + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop = 1000; + while ((data & 0x10000002) != 0x10000002) { + udelay(10); + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop--; + if (!loop) + break; + } + + if (!loop) { + DRM_ERROR("failed to init MMSCH, " \ + "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data); + return -EBUSY; + } + + return 0; +} + +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) +{ + int r; + uint32_t tmp; + struct amdgpu_ring *ring; + uint32_t offset, size; + uint32_t table_size = 0; + struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} }; + struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} }; + struct mmsch_v2_0_cmd_end end = { {0} }; + struct mmsch_v2_0_init_header *header; + uint32_t *init_table = adev->virt.mm_table.cpu_addr; + uint8_t i = 0; + + header = (struct mmsch_v2_0_init_header *)init_table; + direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) { + header->version = MMSCH_VERSION; + header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2; + + header->vcn_table_offset = header->header_size; + + init_table += header->vcn_table_offset; + + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + + MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + 0xFFFFFFFF, 0x00000004); + + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi); + offset = 0; + } else { + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr)); + offset = size; + } + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), + size); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), + AMDGPU_VCN_STACK_SIZE); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_VCN_CONTEXT_SIZE); + + for (r = 0; r < adev->vcn.num_enc_rings; ++r) { + ring = &adev->vcn.inst->ring_enc[r]; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), + upper_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), + ring->ring_size / 4); + } + + ring = &adev->vcn.inst->ring_dec; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), + upper_32_bits(ring->gpu_addr)); + /* force RBC into idle state */ + tmp = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + /* add end packet */ + tmp = sizeof(struct mmsch_v2_0_cmd_end); + memcpy((void *)init_table, &end, tmp); + table_size += (tmp / 4); + header->vcn_table_size = table_size; + + } + return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); +} + static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, @@ -2219,36 +2060,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_0, - .get_rptr = vcn_v2_0_jpeg_ring_get_rptr, - .get_wptr = vcn_v2_0_jpeg_ring_get_wptr, - .set_wptr = vcn_v2_0_jpeg_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ - 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ - 8 + 16, - .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v2_0_jpeg_ring_nop, - .insert_start = vcn_v2_0_jpeg_ring_insert_start, - .insert_end = vcn_v2_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs; @@ -2265,12 +2076,6 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev) DRM_INFO("VCN encode is enabled in VM mode\n"); } -static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs; - DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); -} - static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = { .set = vcn_v2_0_set_interrupt_state, .process = vcn_v2_0_process_interrupt, @@ -2278,7 +2083,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = { static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h index 8467292f32e5..6c9de1882428 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h @@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, @@ -49,19 +50,6 @@ extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr); extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); -extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring); -extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring); -extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags); -extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_ib *ib, uint32_t flags); -extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask); -extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr); -extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); -extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count); - extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block; #endif /* __VCN_V2_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 93edf9193a7b..44fc4c218433 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -22,6 +22,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vcn.h" @@ -29,6 +30,7 @@ #include "soc15.h" #include "soc15d.h" #include "vcn_v2_0.h" +#include "mmsch_v1_0.h" #include "vcn/vcn_2_5_offset.h" #include "vcn/vcn_2_5_sh_mask.h" @@ -44,19 +46,19 @@ #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 -#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c -#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f - -#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 +#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_5_set_powergating_state(void *handle, enum amd_powergating_state state); +static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state); +static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, @@ -73,29 +75,30 @@ static int amdgpu_ih_clientid_vcns[] = { static int vcn_v2_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) { + + if (amdgpu_sriov_vf(adev)) { + adev->vcn.num_vcn_inst = 2; + adev->vcn.harvest_config = 0; + adev->vcn.num_enc_rings = 1; + } else { u32 harvest; int i; - adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); + harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) adev->vcn.harvest_config |= 1 << i; } - if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | - AMDGPU_VCN_HARVEST_VCN1)) + AMDGPU_VCN_HARVEST_VCN1)) /* both instances are harvested, disable the block */ return -ENOENT; - } else - adev->vcn.num_vcn_inst = 1; - adev->vcn.num_enc_rings = 2; + adev->vcn.num_enc_rings = 2; + } vcn_v2_5_set_dec_ring_funcs(adev); vcn_v2_5_set_enc_ring_funcs(adev); - vcn_v2_5_set_jpeg_ring_funcs(adev); vcn_v2_5_set_irq_funcs(adev); return 0; @@ -130,40 +133,21 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; } - - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq); - if (r) - return r; } r = amdgpu_vcn_sw_init(adev); if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - - if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) { - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - } - DRM_INFO("PSP loading VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) return r; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + volatile struct amdgpu_fw_shared *fw_shared; + if (adev->vcn.harvest_config & (1 << j)) continue; adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; @@ -174,46 +158,57 @@ static int vcn_v2_5_sw_init(void *handle) adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; - adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9); + adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9); adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; - adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0); + adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0); adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; - adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1); + adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1); adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; - adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD); + adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD); adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; - adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); - - adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH); + adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP); ring = &adev->vcn.inst[j].ring_dec; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j; + + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + (amdgpu_sriov_vf(adev) ? 2*j : 8*j); sprintf(ring->name, "vcn_dec_%d", j); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, + 0, AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst[j].ring_enc[i]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j; + + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); + sprintf(ring->name, "vcn_enc_%d.%d", j, i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + r = amdgpu_ring_init(adev, ring, 512, + &adev->vcn.inst[j].irq, 0, + hw_prio, NULL); if (r) return r; } - ring = &adev->vcn.inst[j].ring_jpeg; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j; - sprintf(ring->name, "vcn_jpeg_%d", j); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG); + } + + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); if (r) return r; } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode; + return 0; } @@ -226,8 +221,23 @@ static int vcn_v2_5_sw_init(void *handle) */ static int vcn_v2_5_sw_fini(void *handle) { - int r; + int i, r, idx; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + volatile struct amdgpu_fw_shared *fw_shared; + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 = 0; + } + drm_dev_exit(idx); + } + + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); r = amdgpu_vcn_suspend(adev); if (r) @@ -249,35 +259,44 @@ static int vcn_v2_5_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, j, r; + int i, j, r = 0; + + if (amdgpu_sriov_vf(adev)) + r = vcn_v2_5_sriov_start(adev); for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; - ring = &adev->vcn.inst[j].ring_dec; - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ring->doorbell_index, j); + if (amdgpu_sriov_vf(adev)) { + adev->vcn.inst[j].ring_enc[0].sched.ready = true; + adev->vcn.inst[j].ring_enc[1].sched.ready = false; + adev->vcn.inst[j].ring_enc[2].sched.ready = false; + adev->vcn.inst[j].ring_dec.sched.ready = true; + } else { - r = amdgpu_ring_test_helper(ring); - if (r) - goto done; + ring = &adev->vcn.inst[j].ring_dec; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ring->doorbell_index, j); - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[j].ring_enc[i]; r = amdgpu_ring_test_helper(ring); if (r) goto done; - } - ring = &adev->vcn.inst[j].ring_jpeg; - r = amdgpu_ring_test_helper(ring); - if (r) - goto done; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.inst[j].ring_enc[i]; + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } + } } + done: if (!r) - DRM_INFO("VCN decode and encode initialized successfully.\n"); + DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); return r; } @@ -292,26 +311,18 @@ done: static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring; - int i, j; + int i; + + cancel_delayed_work_sync(&adev->vcn.idle_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - ring = &adev->vcn.inst[i].ring_dec; - if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || + (adev->vcn.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); - - ring->sched.ready = false; - - for (j = 0; j < adev->vcn.num_enc_rings; ++j) { - ring = &adev->vcn.inst[i].ring_enc[j]; - ring->sched.ready = false; - } - - ring = &adev->vcn.inst[i].ring_jpeg; - ring->sched.ready = false; } return 0; @@ -377,41 +388,146 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) continue; /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; } else { - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[i].gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[i].gpu_addr)); offset = size; - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); } - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size); /* cache window 1: stack */ - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0); - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); } } +static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t offset; + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + offset = size; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); + } + + if (!indirect) + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); +} + /** * vcn_v2_5_disable_clock_gating - disable VCN clock gating * @@ -422,7 +538,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; - int ret = 0; int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -462,7 +577,7 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data); - SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF); data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK @@ -530,6 +645,54 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) } } +static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, + uint8_t sram_sel, int inst_idx, uint8_t indirect) +{ + uint32_t reg_data = 0; + + /* enable sw clock gating control */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); + + /* turn off clock gating */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); + + /* turn on SUVD clock gating */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + + /* turn on sw mode in UVD_SUVD_CGC_CTRL */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); +} + /** * vcn_v2_5_enable_clock_gating - enable VCN clock gating * @@ -592,111 +755,146 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) } } -/** - * jpeg_v2_5_start - start JPEG block - * - * @adev: amdgpu_device pointer - * - * Setup and start the JPEG block - */ -static int jpeg_v2_5_start(struct amdgpu_device *adev) +static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; struct amdgpu_ring *ring; - uint32_t tmp; - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - ring = &adev->vcn.inst[i].ring_jpeg; - /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - - /* JPEG disable CGC */ - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); - tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); - tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK - | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK - | JPEG_CGC_CTRL__JMCIF_MODE_MASK - | JPEG_CGC_CTRL__JRBBM_MODE_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); - - /* MJPEG global tiling registers */ - WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR); - } - - return 0; -} - -/** - * jpeg_v2_5_stop - stop JPEG block - * - * @adev: amdgpu_device pointer - * - * stop the JPEG block - */ -static int jpeg_v2_5_stop(struct amdgpu_device *adev) -{ - uint32_t tmp; - int i; + uint32_t rb_bufsz, tmp; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); - tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); - - /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - } + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp); + + if (indirect) + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + + /* enable clock gating */ + vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interupt */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect); + + /* setup mmUVD_LMI_CTRL */ + tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_MPC_CNTL), + 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_MPC_SET_MUXA0), + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_MPC_SET_MUXB0), + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_MPC_SET_MUX), + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); + + vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); + + /* enable LMI MC and UMC channels */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); + + /* unblock VCPU register access */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, 0, mmUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + if (indirect) + psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, + (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + + ring = &adev->vcn.inst[inst_idx].ring_dec; + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp); + + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; + + /* set the write pointer delay */ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, + (upper_32_bits(ring->gpu_addr) >> 2)); + + /* program the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0); + + WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0); + + ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); return 0; } @@ -713,15 +911,23 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } + /* disable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* set uvd status busy */ - tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; - WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); + tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp); } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return 0; + /*SW clock gating */ vcn_v2_5_disable_clock_gating(adev); @@ -729,44 +935,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; /* enable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); /* disable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* setup mmUVD_LMI_CTRL */ - tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL); + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL); tmp &= ~0xff; - WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8| + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8| UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); /* setup mmUVD_MPC_CNTL */ - tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL); + tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL); tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); /* setup UVD_MPC_SET_MUXA0 */ - WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0, + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0, ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); /* setup UVD_MPC_SET_MUXB0 */ - WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0, + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0, ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); /* setup mmUVD_MPC_SET_MUX */ - WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX, + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX, ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); @@ -775,30 +981,31 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) vcn_v2_5_mc_resume(adev); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; if (adev->vcn.harvest_config & (1 << i)) continue; /* VCN global tiling registers */ - WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, + WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, + WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG, adev->gfx.config.gb_addr_config); /* enable LMI MC and UMC channels */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* unblock VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0, ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); for (k = 0; k < 10; ++k) { uint32_t status; for (j = 0; j < 100; ++j) { - status = RREG32_SOC15(UVD, i, mmUVD_STATUS); + status = RREG32_SOC15(VCN, i, mmUVD_STATUS); if (status & 2) break; if (amdgpu_emu_mode == 1) @@ -811,11 +1018,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) break; DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__BLK_RST_MASK, ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); @@ -828,15 +1035,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) } /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* clear the busy bit of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0); + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0); ring = &adev->vcn.inst[i].ring_dec; /* force RBC into idle state */ @@ -846,53 +1053,284 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp); - /* programm the RB_BASE for ring buffer */ - WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; + /* program the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0); - ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR); - WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR, + ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); - ring = &adev->vcn.inst[i].ring_enc[0]; - WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; + ring = &adev->vcn.inst[i].ring_enc[0]; + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; ring = &adev->vcn.inst[i].ring_enc[1]; - WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; } - r = jpeg_v2_5_start(adev); - return r; + return 0; } -static int vcn_v2_5_stop(struct amdgpu_device *adev) +static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev, + struct amdgpu_mm_table *table) +{ + uint32_t data = 0, loop = 0, size = 0; + uint64_t addr = table->gpu_addr; + struct mmsch_v1_1_init_header *header = NULL; + + header = (struct mmsch_v1_1_init_header *)table->cpu_addr; + size = header->total_size; + + /* + * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of + * memory descriptor location + */ + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); + + /* 2, update vmid of descriptor */ + data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID); + data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data); + + /* 3, notify mmsch about the size of this descriptor */ + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + + /* + * 5, kick off the initialization and wait until + * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); + + data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP); + loop = 10; + while ((data & 0x10000002) != 0x10000002) { + udelay(100); + data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP); + loop--; + if (!loop) + break; + } + + if (!loop) { + dev_err(adev->dev, + "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n", + data); + return -EBUSY; + } + + return 0; +} + +static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + uint32_t offset, size, tmp, i, rb_bufsz; + uint32_t table_size = 0; + struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } }; + struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } }; + struct mmsch_v1_0_cmd_end end = { { 0 } }; + uint32_t *init_table = adev->virt.mm_table.cpu_addr; + struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table; + + direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + header->version = MMSCH_VERSION; + header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2; + init_table += header->total_size; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + header->eng[i].table_offset = header->total_size; + header->eng[i].init_status = 0; + header->eng[i].table_size = 0; + + table_size = 0; + + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + offset = 0; + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0); + } else { + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = size; + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0), + size); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1), + 0); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1), + AMDGPU_VCN_STACK_SIZE); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2), + 0); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_VCN_CONTEXT_SIZE); + + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->wptr = 0; + + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO), + lower_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI), + upper_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE), + ring->ring_size / 4); + + ring = &adev->vcn.inst[i].ring_dec; + ring->wptr = 0; + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), + lower_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), + upper_32_bits(ring->gpu_addr)); + + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + MMSCH_V1_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp); + + /* add end packet */ + memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); + table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; + init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4; + + /* refine header */ + header->eng[i].table_size = table_size; + header->total_size += table_size; + } + + return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table); +} + +static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { uint32_t tmp; - int i, r; - r = jpeg_v2_5_stop(adev); - if (r) - return r; + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); + + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); + + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); + + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + return 0; +} + +static int vcn_v2_5_stop(struct amdgpu_device *adev) +{ + uint32_t tmp; + int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v2_5_stop_dpg_mode(adev, i); + continue; + } + /* wait for vcn idle */ - SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); if (r) return r; @@ -900,7 +1338,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); if (r) return r; @@ -911,22 +1349,22 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); if (r) return r; /* block VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), UVD_RB_ARB_CTRL__VCPU_DIS_MASK, ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); /* reset VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__BLK_RST_MASK, ~UVD_VCPU_CNTL__BLK_RST_MASK); /* disable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, ~(UVD_VCPU_CNTL__CLK_EN_MASK)); /* clear status */ @@ -935,7 +1373,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) vcn_v2_5_enable_clock_gating(adev); /* enable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } @@ -946,6 +1384,81 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) return 0; } +static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state) +{ + struct amdgpu_ring *ring; + uint32_t reg_data = 0; + int ret_code = 0; + + /* pause/unpause if state is changed */ + if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { + DRM_DEBUG("dpg pause state changed %d -> %d", + adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); + reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + if (!ret_code) { + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + + /* Restore */ + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; + ring = &adev->vcn.inst[inst_idx].ring_enc[1]; + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; + + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, + UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + } + } else { + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + } + adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + /** * vcn_v2_5_dec_ring_get_rptr - get read pointer * @@ -957,7 +1470,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR); } /** @@ -974,7 +1487,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR); } /** @@ -992,7 +1505,7 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } } @@ -1013,7 +1526,37 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, + .test_ib = amdgpu_vcn_dec_ring_test_ib, + .insert_nop = vcn_v2_0_dec_ring_insert_nop, + .insert_start = vcn_v2_0_dec_ring_insert_start, + .insert_end = vcn_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_DEC, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = vcn_v2_5_dec_ring_get_rptr, + .get_wptr = vcn_v2_5_dec_ring_get_wptr, + .set_wptr = vcn_v2_5_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ + 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ + 6, + .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ + .emit_ib = vcn_v2_0_dec_ring_emit_ib, + .emit_fence = vcn_v2_0_dec_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, @@ -1038,9 +1581,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR); else - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2); } /** @@ -1058,12 +1601,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR); } else { if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2); } } @@ -1083,14 +1626,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); } } else { if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); } } } @@ -1125,84 +1668,34 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -/** - * vcn_v2_5_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v2_5_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; - else - return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v2_5_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) { - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); - WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); - } else { - WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); - } -} - -static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_1, - .get_rptr = vcn_v2_5_jpeg_ring_get_rptr, - .get_wptr = vcn_v2_5_jpeg_ring_get_wptr, - .set_wptr = vcn_v2_5_jpeg_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ - 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ - 8 + 16, - .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v2_0_jpeg_ring_nop, - .insert_start = vcn_v2_0_jpeg_ring_insert_start, - .insert_end = vcn_v2_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, + .align_mask = 0x3f, + .nop = VCN_ENC_CMD_NO_OP, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = vcn_v2_5_enc_ring_get_rptr, + .get_wptr = vcn_v2_5_enc_ring_get_wptr, + .set_wptr = vcn_v2_5_enc_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_enc_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v2_0_enc_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -1212,7 +1705,10 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; + else /* CHIP_ALDEBARAN */ + adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs; adev->vcn.inst[i].ring_dec.me = i; DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i); } @@ -1226,26 +1722,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << j)) continue; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) + adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; + else /* CHIP_ALDEBARAN */ + adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs; adev->vcn.inst[j].ring_enc[i].me = j; } DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j); } } -static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; - adev->vcn.inst[i].ring_jpeg.me = i; - DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i); - } -} - static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1268,8 +1754,8 @@ static int vcn_v2_5_wait_for_idle(void *handle) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE, ret); + ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); if (ret) return ret; } @@ -1281,10 +1767,13 @@ static int vcn_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); + + if (amdgpu_sriov_vf(adev)) + return 0; if (enable) { - if (vcn_v2_5_is_idle(handle)) + if (!vcn_v2_5_is_idle(handle)) return -EBUSY; vcn_v2_5_enable_clock_gating(adev); } else { @@ -1300,6 +1789,9 @@ static int vcn_v2_5_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret; + if (amdgpu_sriov_vf(adev)) + return 0; + if(state == adev->vcn.cur_state) return 0; @@ -1352,9 +1844,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; - case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -1376,7 +1865,7 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; } } @@ -1401,6 +1890,26 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .set_powergating_state = vcn_v2_5_set_powergating_state, }; +static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { + .name = "vcn_v2_6", + .early_init = vcn_v2_5_early_init, + .late_init = NULL, + .sw_init = vcn_v2_5_sw_init, + .sw_fini = vcn_v2_5_sw_fini, + .hw_init = vcn_v2_5_hw_init, + .hw_fini = vcn_v2_5_hw_fini, + .suspend = vcn_v2_5_suspend, + .resume = vcn_v2_5_resume, + .is_idle = vcn_v2_5_is_idle, + .wait_for_idle = vcn_v2_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v2_5_set_clockgating_state, + .set_powergating_state = vcn_v2_5_set_powergating_state, +}; + const struct amdgpu_ip_block_version vcn_v2_5_ip_block = { .type = AMD_IP_BLOCK_TYPE_VCN, @@ -1409,3 +1918,12 @@ const struct amdgpu_ip_block_version vcn_v2_5_ip_block = .rev = 0, .funcs = &vcn_v2_5_ip_funcs, }; + +const struct amdgpu_ip_block_version vcn_v2_6_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 2, + .minor = 6, + .rev = 0, + .funcs = &vcn_v2_6_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h index 8d9c0800b8e0..e72f799ed0fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h @@ -25,5 +25,6 @@ #define __VCN_V2_5_H__ extern const struct amdgpu_ip_block_version vcn_v2_5_ip_block; +extern const struct amdgpu_ip_block_version vcn_v2_6_ip_block; #endif /* __VCN_V2_5_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c new file mode 100644 index 000000000000..da11ceba0698 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -0,0 +1,2277 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_vcn.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "vcn_v2_0.h" +#include "mmsch_v3_0.h" + +#include "vcn/vcn_3_0_0_offset.h" +#include "vcn/vcn_3_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#include <drm/drm_drv.h> + +#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 +#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f +#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 +#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11 +#define mmUVD_NO_OP_INTERNAL_OFFSET 0x29 +#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66 +#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d + +#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 +#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c + +#define VCN_INSTANCES_SIENNA_CICHLID 2 +#define DEC_SW_RING_ENABLED FALSE + +#define RDECODE_MSG_CREATE 0x00000000 +#define RDECODE_MESSAGE_CREATE 0x00000001 + +static int amdgpu_ih_clientid_vcns[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + +static int vcn_v3_0_start_sriov(struct amdgpu_device *adev); +static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev); +static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev); +static int vcn_v3_0_set_powergating_state(void *handle, + enum amd_powergating_state state); +static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state); + +static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring); +static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); + +/** + * vcn_v3_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int vcn_v3_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) { + adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; + adev->vcn.harvest_config = 0; + adev->vcn.num_enc_rings = 1; + + } else { + if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | + AMDGPU_VCN_HARVEST_VCN1)) + /* both instances are harvested, disable the block */ + return -ENOENT; + + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33)) + adev->vcn.num_enc_rings = 0; + else + adev->vcn.num_enc_rings = 2; + } + + vcn_v3_0_set_dec_ring_funcs(adev); + vcn_v3_0_set_enc_ring_funcs(adev); + vcn_v3_0_set_irq_funcs(adev); + + return 0; +} + +/** + * vcn_v3_0_sw_init - sw init for VCN block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int vcn_v3_0_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int i, j, r; + int vcn_doorbell_index = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_vcn_sw_init(adev); + if (r) + return r; + + amdgpu_vcn_setup_ucode(adev); + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + /* + * Note: doorbell assignment is fixed for SRIOV multiple VCN engines + * Formula: + * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1; + * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j + */ + if (amdgpu_sriov_vf(adev)) { + vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1; + /* get DWORD offset */ + vcn_doorbell_index = vcn_doorbell_index << 1; + } + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_fw_shared *fw_shared; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + + adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; + adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; + adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; + adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; + + adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; + adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9); + adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; + adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0); + adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; + adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1); + adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; + adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD); + adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; + adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP); + + /* VCN DEC TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], + VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq); + if (r) + return r; + + atomic_set(&adev->vcn.inst[i].sched_score, 0); + + ring = &adev->vcn.inst[i].ring_dec; + ring->use_doorbell = true; + if (amdgpu_sriov_vf(adev)) { + ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1); + } else { + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; + } + sprintf(ring->name, "vcn_dec_%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, + AMDGPU_RING_PRIO_DEFAULT, + &adev->vcn.inst[i].sched_score); + if (r) + return r; + + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j); + + /* VCN ENC TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], + j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); + if (r) + return r; + + ring = &adev->vcn.inst[i].ring_enc[j]; + ring->use_doorbell = true; + if (amdgpu_sriov_vf(adev)) { + ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j; + } else { + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; + } + sprintf(ring->name, "vcn_enc_%d.%d", i, j); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, + hw_prio, &adev->vcn.inst[i].sched_score); + if (r) + return r; + } + + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) | + cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | + cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); + fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); + } + + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode; + + return 0; +} + +/** + * vcn_v3_0_sw_fini - sw fini for VCN block + * + * @handle: amdgpu_device pointer + * + * VCN suspend and free up sw allocation + */ +static int vcn_v3_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r, idx; + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_fw_shared *fw_shared; + + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sw_ring.is_enabled = false; + } + + drm_dev_exit(idx); + } + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + + r = amdgpu_vcn_suspend(adev); + if (r) + return r; + + r = amdgpu_vcn_sw_fini(adev); + + return r; +} + +/** + * vcn_v3_0_hw_init - start and test VCN block + * + * @handle: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vcn_v3_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i, j, r; + + if (amdgpu_sriov_vf(adev)) { + r = vcn_v3_0_start_sriov(adev); + if (r) + goto done; + + /* initialize VCN dec and enc ring buffers */ + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ring = &adev->vcn.inst[i].ring_dec; + if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) { + ring->sched.ready = false; + dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name); + } else { + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v3_0_dec_ring_set_wptr(ring); + ring->sched.ready = true; + } + + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + ring = &adev->vcn.inst[i].ring_enc[j]; + if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) { + ring->sched.ready = false; + dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name); + } else { + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v3_0_enc_ring_set_wptr(ring); + ring->sched.ready = true; + } + } + } + } else { + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ring = &adev->vcn.inst[i].ring_dec; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ring->doorbell_index, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + ring = &adev->vcn.inst[i].ring_enc[j]; + r = amdgpu_ring_test_helper(ring); + if (r) + goto done; + } + } + } + +done: + if (!r) + DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); + + return r; +} + +/** + * vcn_v3_0_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the VCN block, mark ring as not ready any more + */ +static int vcn_v3_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + cancel_delayed_work_sync(&adev->vcn.idle_work); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + if (!amdgpu_sriov_vf(adev)) { + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || + (adev->vcn.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(VCN, i, mmUVD_STATUS))) { + vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + } + } + } + + return 0; +} + +/** + * vcn_v3_0_suspend - suspend VCN block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend VCN block + */ +static int vcn_v3_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vcn_v3_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_vcn_suspend(adev); + + return r; +} + +/** + * vcn_v3_0_resume - resume VCN block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init VCN block + */ +static int vcn_v3_0_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + r = vcn_v3_0_hw_init(adev); + + return r; +} + +/** + * vcn_v3_0_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Let the VCN memory controller know it's offsets + */ +static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst) +{ + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t offset; + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr)); + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr)); + offset = size; + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr)); + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr)); + WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); +} + +static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t offset; + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + offset = size; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); + } + + if (!indirect) + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( + UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); +} + +static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data = 0; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); + + WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF); + } else { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); + WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF); + } + + data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS); + data &= ~0x103; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | + UVD_POWER_STATUS__UVD_PG_EN_MASK; + + WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data); +} + +static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + /* Before power off, this indicator has to be turned on */ + data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS); + data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; + data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data); + + data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); + WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); + + data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); + SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF); + } +} + +/** + * vcn_v3_0_disable_clock_gating - disable VCN clock gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Disable clock gating for VCN block + */ +static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + /* VCN disable CGC */ + data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE); + data &= ~(UVD_CGC_GATE__SYS_MASK + | UVD_CGC_GATE__UDEC_MASK + | UVD_CGC_GATE__MPEG2_MASK + | UVD_CGC_GATE__REGS_MASK + | UVD_CGC_GATE__RBC_MASK + | UVD_CGC_GATE__LMI_MC_MASK + | UVD_CGC_GATE__LMI_UMC_MASK + | UVD_CGC_GATE__IDCT_MASK + | UVD_CGC_GATE__MPRD_MASK + | UVD_CGC_GATE__MPC_MASK + | UVD_CGC_GATE__LBSI_MASK + | UVD_CGC_GATE__LRBBM_MASK + | UVD_CGC_GATE__UDEC_RE_MASK + | UVD_CGC_GATE__UDEC_CM_MASK + | UVD_CGC_GATE__UDEC_IT_MASK + | UVD_CGC_GATE__UDEC_DB_MASK + | UVD_CGC_GATE__UDEC_MP_MASK + | UVD_CGC_GATE__WCB_MASK + | UVD_CGC_GATE__VCPU_MASK + | UVD_CGC_GATE__MMSCH_MASK); + + WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data); + + SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF); + + data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); + data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK + | UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE); + data |= (UVD_SUVD_CGC_GATE__SRE_MASK + | UVD_SUVD_CGC_GATE__SIT_MASK + | UVD_SUVD_CGC_GATE__SMP_MASK + | UVD_SUVD_CGC_GATE__SCM_MASK + | UVD_SUVD_CGC_GATE__SDB_MASK + | UVD_SUVD_CGC_GATE__SRE_H264_MASK + | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK + | UVD_SUVD_CGC_GATE__SIT_H264_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCM_H264_MASK + | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK + | UVD_SUVD_CGC_GATE__SDB_H264_MASK + | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCLR_MASK + | UVD_SUVD_CGC_GATE__ENT_MASK + | UVD_SUVD_CGC_GATE__IME_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK + | UVD_SUVD_CGC_GATE__SITE_MASK + | UVD_SUVD_CGC_GATE__SRE_VP9_MASK + | UVD_SUVD_CGC_GATE__SCM_VP9_MASK + | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK + | UVD_SUVD_CGC_GATE__SDB_VP9_MASK + | UVD_SUVD_CGC_GATE__IME_HEVC_MASK + | UVD_SUVD_CGC_GATE__EFC_MASK + | UVD_SUVD_CGC_GATE__SAOE_MASK + | UVD_SUVD_CGC_GATE__SRE_AV1_MASK + | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK + | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK + | UVD_SUVD_CGC_GATE__SCM_AV1_MASK + | UVD_SUVD_CGC_GATE__SMPA_MASK); + WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data); + + data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2); + data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK + | UVD_SUVD_CGC_GATE2__MPBE1_MASK + | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK + | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK + | UVD_SUVD_CGC_GATE2__MPC1_MASK); + WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data); + + data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL); + data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK + | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK + | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK + | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK + | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK + | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK + | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK + | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); + WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data); +} + +static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev, + uint8_t sram_sel, int inst_idx, uint8_t indirect) +{ + uint32_t reg_data = 0; + + /* enable sw clock gating control */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); + + /* turn off clock gating */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect); + + /* turn on SUVD clock gating */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + + /* turn on sw mode in UVD_SUVD_CGC_CTRL */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); +} + +/** + * vcn_v3_0_enable_clock_gating - enable VCN clock gating + * + * @adev: amdgpu_device pointer + * @inst: instance number + * + * Enable clock gating for VCN block + */ +static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst) +{ + uint32_t data; + + /* enable VCN CGC */ + data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); + data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK + | UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL); + data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK + | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK + | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK + | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK + | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK + | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK + | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK + | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); + WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data); +} + +static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + struct amdgpu_ring *ring; + uint32_t rb_bufsz, tmp; + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp); + + if (indirect) + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + + /* enable clock gating */ + vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interupt */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect); + + /* setup mmUVD_LMI_CTRL */ + tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_MPC_CNTL), + 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_MPC_SET_MUXA0), + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_MPC_SET_MUXB0), + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_MPC_SET_MUX), + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); + + vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect); + + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); + + /* enable LMI MC and UMC channels */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect); + + /* unblock VCPU register access */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + /* add nop to workaround PSP size check */ + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); + + if (indirect) + psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, + (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - + (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); + + ring = &adev->vcn.inst[inst_idx].ring_dec; + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp); + + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + + /* set the write pointer delay */ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, + (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0); + + WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0); + + ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + + /* Reset FW shared memory RBC WPTR/RPTR */ + fw_shared->rb.rptr = 0; + fw_shared->rb.wptr = lower_32_bits(ring->wptr); + + /*resetting done, fw can check RB ring */ + fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + + return 0; +} + +static int vcn_v3_0_start(struct amdgpu_device *adev) +{ + volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_ring *ring; + uint32_t rb_bufsz, tmp; + int i, j, k, r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){ + r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } + + /* disable VCN power gating */ + vcn_v3_0_disable_static_power_gating(adev, i); + + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp); + + /*SW clock gating */ + vcn_v3_0_disable_clock_gating(adev, i); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); + + /* setup mmUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL); + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + /* setup mmUVD_MPC_CNTL */ + tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); + + /* setup UVD_MPC_SET_MUXA0 */ + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + + /* setup UVD_MPC_SET_MUXB0 */ + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + + /* setup mmUVD_MPC_SET_MUX */ + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + + vcn_v3_0_mc_resume(adev, i); + + /* VCN global tiling registers */ + WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + for (j = 0; j < 10; ++j) { + uint32_t status; + + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, i, mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i); + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; + } + + if (r) { + DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0); + + ring = &adev->vcn.inst[i].ring_dec; + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp); + + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0); + + WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0); + ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + fw_shared->rb.wptr = lower_32_bits(ring->wptr); + fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[i].ring_enc[0]; + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[i].ring_enc[1]; + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + } + } + + return 0; +} + +static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) +{ + int i, j; + struct amdgpu_ring *ring; + uint64_t cache_addr; + uint64_t rb_addr; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t offset, cache_size; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw; + + struct mmsch_v3_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v3_0_cmd_direct_read_modify_write + direct_rd_mod_wt = { {0} }; + struct mmsch_v3_0_cmd_end end = { {0} }; + struct mmsch_v3_0_init_header header; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = + MMSCH_COMMAND__END; + + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2; + for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) { + header.inst[i].init_status = 0; + header.inst[i].table_offset = 0; + header.inst[i].table_size = 0; + } + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + table_size = 0; + + MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + offset = 0; + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_VCPU_CACHE_OFFSET0), + 0); + } else { + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = cache_size; + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_VCPU_CACHE_SIZE0), + cache_size); + + cache_addr = adev->vcn.inst[i].gpu_addr + offset; + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(cache_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(cache_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_VCPU_CACHE_OFFSET1), + 0); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_VCPU_CACHE_SIZE1), + AMDGPU_VCN_STACK_SIZE); + + cache_addr = adev->vcn.inst[i].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE; + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(cache_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(cache_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_VCPU_CACHE_OFFSET2), + 0); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_VCN_CONTEXT_SIZE); + + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + ring = &adev->vcn.inst[i].ring_enc[j]; + ring->wptr = 0; + rb_addr = ring->gpu_addr; + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_RB_BASE_LO), + lower_32_bits(rb_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_RB_BASE_HI), + upper_32_bits(rb_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_RB_SIZE), + ring->ring_size / 4); + } + + ring = &adev->vcn.inst[i].ring_dec; + ring->wptr = 0; + rb_addr = ring->gpu_addr; + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), + lower_32_bits(rb_addr)); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), + upper_32_bits(rb_addr)); + /* force RBC into idle state */ + tmp = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, + mmUVD_RBC_RB_CNTL), + tmp); + + /* add end packet */ + MMSCH_V3_0_INSERT_END(); + + /* refine header */ + header.inst[i].init_status = 0; + header.inst[i].table_offset = header.total_size; + header.inst[i].table_size = table_size; + header.total_size += table_size; + } + + /* Update init table header in memory */ + size = sizeof(struct mmsch_v3_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + /* message MMSCH (in VCN[0]) to initialize this client + * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr + * of memory descriptor location + */ + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + /* 2, update vmid of descriptor */ + tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp); + + /* 3, notify mmsch about the size of this descriptor */ + size = header.total_size; + WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + + /* 5, kick off the initialization and wait until + * MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + param = 0x10000001; + WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = param + 1; + while (resp != expected) { + resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP); + if (resp == expected) + break; + + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for mmMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + + return 0; +} + +static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +{ + uint32_t tmp; + + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); + + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2); + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); + + tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); + + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + return 0; +} + +static int vcn_v3_0_stop(struct amdgpu_device *adev) +{ + uint32_t tmp; + int i, r = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v3_0_stop_dpg_mode(adev, i); + continue; + } + + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); + if (r) + return r; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* disable LMI UMC channel */ + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* block VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* reset VCPU */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* apply soft reset */ + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); + + /* clear status */ + WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); + + /* apply HW clock gating */ + vcn_v3_0_enable_clock_gating(adev, i); + + /* enable VCN power gating */ + vcn_v3_0_enable_static_power_gating(adev, i); + } + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + + return 0; +} + +static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, + int inst_idx, struct dpg_pause_state *new_state) +{ + volatile struct amdgpu_fw_shared *fw_shared; + struct amdgpu_ring *ring; + uint32_t reg_data = 0; + int ret_code; + + /* pause/unpause if state is changed */ + if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { + DRM_DEBUG("dpg pause state changed %d -> %d", + adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); + reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + if (!ret_code) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + + if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { + /* Restore */ + fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[inst_idx].ring_enc[1]; + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + /* restore wptr/rptr with pointers saved in FW shared memory*/ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr); + } + + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, + UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + } + } else { + /* unpause dpg, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); + } + adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + +/** + * vcn_v3_0_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR); +} + +/** + * vcn_v3_0_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR); +} + +/** + * vcn_v3_0_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + volatile struct amdgpu_fw_shared *fw_shared; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ + fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr; + fw_shared->rb.wptr = lower_32_bits(ring->wptr); + WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2, + lower_32_bits(ring->wptr)); + } + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static void vcn_v3_0_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, + u64 seq, uint32_t flags) +{ + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE); + amdgpu_ring_write(ring, addr); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP); +} + +static void vcn_v3_0_dec_sw_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); +} + +static void vcn_v3_0_dec_sw_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + uint32_t vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB); + amdgpu_ring_write(ring, vmid); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); +} + +static void vcn_v3_0_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, mask); + amdgpu_ring_write(ring, val); +} + +static void vcn_v3_0_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring, + uint32_t vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + vcn_v3_0_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask); +} + +static void vcn_v3_0_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, val); +} + +static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_DEC, + .align_mask = 0x3f, + .nop = VCN_DEC_SW_CMD_NO_OP, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = vcn_v3_0_dec_ring_get_rptr, + .get_wptr = vcn_v3_0_dec_ring_get_wptr, + .set_wptr = vcn_v3_0_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */ + 5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */ + 1, /* vcn_v3_0_dec_sw_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */ + .emit_ib = vcn_v3_0_dec_sw_ring_emit_ib, + .emit_fence = vcn_v3_0_dec_sw_ring_emit_fence, + .emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_dec_sw_ring_test_ring, + .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v3_0_dec_sw_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg, + .emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) +{ + struct drm_gpu_scheduler **scheds; + + /* The create msg must be in the first IB submitted */ + if (atomic_read(&p->entity->fence_seq)) + return -EINVAL; + + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] + [AMDGPU_RING_PRIO_DEFAULT].sched; + drm_sched_entity_modify_sched(p->entity, scheds, 1); + return 0; +} + +static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + struct amdgpu_bo_va_mapping *map; + uint32_t *msg, num_buffers; + struct amdgpu_bo *bo; + uint64_t start, end; + unsigned int i; + void * ptr; + int r; + + addr &= AMDGPU_GMC_HOLE_MASK; + r = amdgpu_cs_find_mapping(p, addr, &bo, &map); + if (r) { + DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); + return r; + } + + start = map->start * AMDGPU_GPU_PAGE_SIZE; + end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; + if (addr & 0x7) { + DRM_ERROR("VCN messages must be 8 byte aligned!\n"); + return -EINVAL; + } + + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) { + DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); + return r; + } + + r = amdgpu_bo_kmap(bo, &ptr); + if (r) { + DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); + return r; + } + + msg = ptr + addr - start; + + /* Check length */ + if (msg[1] > end - addr) { + r = -EINVAL; + goto out; + } + + if (msg[3] != RDECODE_MSG_CREATE) + goto out; + + num_buffers = msg[2]; + for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { + uint32_t offset, size, *create; + + if (msg[0] != RDECODE_MESSAGE_CREATE) + continue; + + offset = msg[1]; + size = msg[2]; + + if (offset + size > end) { + r = -EINVAL; + goto out; + } + + create = ptr + addr + offset - start; + + /* H246, HEVC and VP9 can run on any instance */ + if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) + continue; + + r = vcn_v3_0_limit_sched(p); + if (r) + goto out; + } + +out: + amdgpu_bo_kunmap(bo); + return r; +} + +static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, + uint32_t ib_idx) +{ + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); + struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; + uint32_t msg_lo = 0, msg_hi = 0; + unsigned i; + int r; + + /* The first instance can decode anything */ + if (!ring->me) + return 0; + + for (i = 0; i < ib->length_dw; i += 2) { + uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i); + uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1); + + if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) { + msg_lo = val; + } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) { + msg_hi = val; + } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && + val == 0) { + r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); + if (r) + return r; + } + } + return 0; +} + +static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_DEC, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = vcn_v3_0_dec_ring_get_rptr, + .get_wptr = vcn_v3_0_dec_ring_get_wptr, + .set_wptr = vcn_v3_0_dec_ring_set_wptr, + .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ + 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ + 6, + .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ + .emit_ib = vcn_v2_0_dec_ring_emit_ib, + .emit_fence = vcn_v2_0_dec_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, + .test_ring = vcn_v2_0_dec_ring_test_ring, + .test_ib = amdgpu_vcn_dec_ring_test_ib, + .insert_nop = vcn_v2_0_dec_ring_insert_nop, + .insert_start = vcn_v2_0_dec_ring_insert_start, + .insert_end = vcn_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +/** + * vcn_v3_0_enc_ring_get_rptr - get enc read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware enc read pointer + */ +static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR); + else + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2); +} + +/** + * vcn_v3_0_enc_ring_get_wptr - get enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware enc write pointer + */ +static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR); + } else { + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2); + } +} + +/** + * vcn_v3_0_enc_ring_set_wptr - set enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the enc write pointer to the hardware + */ +static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + } + } else { + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + } + } +} + +static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, + .align_mask = 0x3f, + .nop = VCN_ENC_CMD_NO_OP, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = vcn_v3_0_enc_ring_get_rptr, + .get_wptr = vcn_v3_0_enc_ring_get_wptr, + .set_wptr = vcn_v3_0_enc_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_enc_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v2_0_enc_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + if (!DEC_SW_RING_ENABLED) + adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs; + else + adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs; + adev->vcn.inst[i].ring_dec.me = i; + DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i, + DEC_SW_RING_ENABLED?"(Software Ring)":""); + } +} + +static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs; + adev->vcn.inst[i].ring_enc[j].me = i; + } + if (adev->vcn.num_enc_rings > 0) + DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i); + } +} + +static bool vcn_v3_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); + } + + return ret; +} + +static int vcn_v3_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); + if (ret) + return ret; + } + + return ret; +} + +static int vcn_v3_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + if (enable) { + if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE) + return -EBUSY; + vcn_v3_0_enable_clock_gating(adev, i); + } else { + vcn_v3_0_disable_clock_gating(adev, i); + } + } + + return 0; +} + +static int vcn_v3_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + /* for SRIOV, guest should not control VCN Power-gating + * MMSCH FW should control Power-gating and clock-gating + * guest should avoid touching CGC and PG + */ + if (amdgpu_sriov_vf(adev)) { + adev->vcn.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + + if(state == adev->vcn.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = vcn_v3_0_stop(adev); + else + ret = vcn_v3_0_start(adev); + + if(!ret) + adev->vcn.cur_state = state; + + return ret; +} + +static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + + DRM_DEBUG("IH: VCN TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); + break; + case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); + break; + case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = { + .set = vcn_v3_0_set_interrupt_state, + .process = vcn_v3_0_process_interrupt, +}; + +static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs; + } +} + +static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { + .name = "vcn_v3_0", + .early_init = vcn_v3_0_early_init, + .late_init = NULL, + .sw_init = vcn_v3_0_sw_init, + .sw_fini = vcn_v3_0_sw_fini, + .hw_init = vcn_v3_0_hw_init, + .hw_fini = vcn_v3_0_hw_fini, + .suspend = vcn_v3_0_suspend, + .resume = vcn_v3_0_resume, + .is_idle = vcn_v3_0_is_idle, + .wait_for_idle = vcn_v3_0_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v3_0_set_clockgating_state, + .set_powergating_state = vcn_v3_0_set_powergating_state, +}; + +const struct amdgpu_ip_block_version vcn_v3_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vcn_v3_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.h new file mode 100644 index 000000000000..31683582d778 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCN_V3_0_H__ +#define __VCN_V3_0_H__ + +extern const struct amdgpu_ip_block_version vcn_v3_0_ip_block; + +#endif /* __VCN_V3_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 5cb7e231de5f..a9ca6988009e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -38,129 +38,121 @@ static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev); /** - * vega10_ih_enable_interrupts - Enable the interrupt ring buffer + * vega10_ih_init_register_offset - Initialize register offset for ih rings * * @adev: amdgpu_device pointer * - * Enable the interrupt ring buffer (VEGA10). + * Initialize register offset ih rings (VEGA10). */ -static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) +static void vega10_ih_init_register_offset(struct amdgpu_device *adev) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + struct amdgpu_ih_regs *ih_regs; + + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; } - adev->irq.ih.enabled = true; if (adev->irq.ih1.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, - RB_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); - } - adev->irq.ih1.enabled = true; + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; } if (adev->irq.ih2.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, - RB_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } - adev->irq.ih2.enabled = true; + ih_regs = &adev->irq.ih2.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2; } } /** - * vega10_ih_disable_interrupts - Disable the interrupt ring buffer + * vega10_ih_toggle_ring_interrupts - toggle the interrupt ring buffer * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointet + * @enable: true - enable the interrupts, false - disable the interrupts * - * Disable the interrupt ring buffer (VEGA10). + * Toggle the interrupt ring buffer (VEGA10) */ -static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) +static int vega10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) { - u32 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); - return; + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; } } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + WREG32(ih_regs->ih_rb_cntl, tmp); } - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - adev->irq.ih.enabled = false; - adev->irq.ih.rptr = 0; - - if (adev->irq.ih1.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, - RB_ENABLE, 0); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); - } + if (enable) { + ih->enabled = true; + } else { /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); - adev->irq.ih1.enabled = false; - adev->irq.ih1.rptr = 0; + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; } - if (adev->irq.ih2.ring_size) { - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, - RB_ENABLE, 0); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } + return 0; +} - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); - adev->irq.ih2.enabled = false; - adev->irq.ih2.rptr = 0; +/** + * vega10_ih_toggle_interrupts - Toggle all the available interrupt ring buffers + * + * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers + * + * Toggle all the available interrupt ring buffers (VEGA10). + */ +static int vega10_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = vega10_ih_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; + } } + + return 0; } static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) @@ -206,143 +198,108 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) } /** - * vega10_ih_irq_init - init and enable the interrupt ring + * vega10_ih_enable_ring - enable an ih ring buffer * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer * - * Allocate a ring buffer for the interrupt controller, - * enable the RLC, disable interrupts, enable the IH - * ring buffer and enable it (VI). - * Called at device load and reume. - * Returns 0 for success, errors for failure. + * Enable an ih ring buffer (VEGA10) */ -static int vega10_ih_irq_init(struct amdgpu_device *adev) +static int vega10_ih_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - struct amdgpu_ih_ring *ih; - u32 ih_rb_cntl, ih_chicken; - int ret = 0; - u32 tmp; + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; - /* disable irqs */ - vega10_ih_disable_interrupts(adev); - - adev->nbio.funcs->ih_control(adev); + ih_regs = &ih->ih_regs; - ih = &adev->irq.ih; /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); - ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); - ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - if (adev->irq.ih.use_bus_addr) { - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); - } else { - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_FBPA_ENABLE, 1); - } - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, - !!adev->irq.msi_enabled); + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = vega10_ih_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { - DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); return -ETIMEDOUT; } } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); + WREG32(ih_regs->ih_rb_cntl, tmp); } - if ((adev->asic_type == CHIP_ARCTURUS - && adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) - || adev->asic_type == CHIP_RENOIR) - WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); - - /* set the writeback address whether it's enabled or not */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, - lower_32_bits(ih->wptr_addr)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, - upper_32_bits(ih->wptr_addr) & 0xFFFF); + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); - - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, - vega10_ih_doorbell_rptr(ih)); - - ih = &adev->irq.ih1; - if (ih->ring_size) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1, - (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); - ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, - WPTR_OVERFLOW_ENABLE, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, - RB_FULL_DRAIN_ENABLE, 1); - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); - } + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); + WREG32(ih_regs->ih_doorbell_rptr, vega10_ih_doorbell_rptr(ih)); - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1, - vega10_ih_doorbell_rptr(ih)); - } + return 0; +} - ih = &adev->irq.ih2; - if (ih->ring_size) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2, - (ih->gpu_addr >> 40) & 0xff); - - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); - ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - - if (amdgpu_sriov_vf(adev)) { - if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, - ih_rb_cntl)) { - DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); - return -ETIMEDOUT; - } - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); - } +/** + * vega10_ih_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it (VI). + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int vega10_ih_irq_init(struct amdgpu_device *adev) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + u32 ih_chicken; + int ret; + int i; - /* set rptr, wptr to 0 */ - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); + /* disable irqs */ + ret = vega10_ih_toggle_interrupts(adev, false); + if (ret) + return ret; - WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2, - vega10_ih_doorbell_rptr(ih)); - } + adev->nbio.funcs->ih_control(adev); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); - tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, - CLIENT18_IS_STORM_CLIENT, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); + if (adev->asic_type == CHIP_RENOIR) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); + if (adev->irq.ih.use_bus_addr) { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_GPA_ENABLE, 1); + } + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); + } - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); - tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + ret = vega10_ih_enable_ring(adev, ih[i]); + if (ret) + return ret; + } + } pci_set_master(adev->pdev); /* enable interrupts */ - vega10_ih_enable_interrupts(adev); + ret = vega10_ih_toggle_interrupts(adev, true); + if (ret) + return ret; + + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; - return ret; + return 0; } /** @@ -354,7 +311,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) */ static void vega10_ih_irq_disable(struct amdgpu_device *adev) { - vega10_ih_disable_interrupts(adev); + vega10_ih_toggle_interrupts(adev, false); /* Wait and acknowledge irq */ mdelay(1); @@ -364,6 +321,7 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev) * vega10_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to fetch wptr * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (VEGA10). Also check for @@ -373,25 +331,23 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev) static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - u32 wptr, reg, tmp; + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; - wptr = le32_to_cpu(*ih->wptr_cpu); + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); - if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) - goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } - /* Double check that the overflow wasn't already cleared. */ + ih_regs = &ih->ih_regs; - if (ih == &adev->irq.ih) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); - else if (ih == &adev->irq.ih1) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); - else if (ih == &adev->irq.ih2) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); - else - BUG(); - - wptr = RREG32_NO_KIQ(reg); + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -407,91 +363,32 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, wptr, ih->rptr, tmp); ih->rptr = tmp; - if (ih == &adev->irq.ih) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); - else if (ih == &adev->irq.ih1) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); - else if (ih == &adev->irq.ih2) - reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); - else - BUG(); - - tmp = RREG32_NO_KIQ(reg); + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32_NO_KIQ(reg, tmp); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } /** - * vega10_ih_decode_iv - decode an interrupt vector - * - * @adev: amdgpu_device pointer - * - * Decodes the interrupt vector at the current rptr - * position and also advance the position. - */ -static void vega10_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih, - struct amdgpu_iv_entry *entry) -{ - /* wptr/rptr are in bytes! */ - u32 ring_index = ih->rptr >> 2; - uint32_t dw[8]; - - dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); - dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); - dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); - dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); - dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); - dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); - dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); - dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); - - entry->client_id = dw[0] & 0xff; - entry->src_id = (dw[0] >> 8) & 0xff; - entry->ring_id = (dw[0] >> 16) & 0xff; - entry->vmid = (dw[0] >> 24) & 0xf; - entry->vmid_src = (dw[0] >> 31); - entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); - entry->timestamp_src = dw[2] >> 31; - entry->pasid = dw[3] & 0xffff; - entry->pasid_src = dw[3] >> 31; - entry->src_data[0] = dw[4]; - entry->src_data[1] = dw[5]; - entry->src_data[2] = dw[6]; - entry->src_data[3] = dw[7]; - - /* wptr/rptr are in bytes! */ - ih->rptr += 32; -} - -/** * vega10_ih_irq_rearm - rearm IRQ if lost * * @adev: amdgpu_device pointer + * @ih: IH ring to match * */ static void vega10_ih_irq_rearm(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - uint32_t reg_rptr = 0; uint32_t v = 0; uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; - if (ih == &adev->irq.ih) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); - else if (ih == &adev->irq.ih1) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); - else if (ih == &adev->irq.ih2) - reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); - else - return; - + ih_regs = &ih->ih_regs; /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */ for (i = 0; i < MAX_REARM_RETRY; i++) { - v = RREG32_NO_KIQ(reg_rptr); + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); if ((v < ih->ring_size) && (v != ih->rptr)) WDOORBELL32(ih->doorbell_index, ih->rptr); else @@ -503,12 +400,15 @@ static void vega10_ih_irq_rearm(struct amdgpu_device *adev, * vega10_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer + * @ih: IH ring buffer to set rptr * * Set the IH ring buffer rptr. */ static void vega10_ih_set_rptr(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { + struct amdgpu_ih_regs *ih_regs; + if (ih->use_doorbell) { /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; @@ -516,12 +416,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) vega10_ih_irq_rearm(adev, ih); - } else if (ih == &adev->irq.ih) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); - } else if (ih == &adev->irq.ih1) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr); - } else if (ih == &adev->irq.ih2) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); } } @@ -538,15 +435,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t wptr = cpu_to_le32(entry->src_data[0]); - switch (entry->ring_id) { case 1: - *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; case 2: - *adev->irq.ih2.wptr_cpu = wptr; schedule_work(&adev->irq.ih2_work); break; default: break; @@ -590,20 +483,28 @@ static int vega10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); - if (r) - return r; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + if (r) + return r; - adev->irq.ih1.use_doorbell = true; - adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; + + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + } + /* initialize ih control registers offset */ + vega10_ih_init_register_offset(adev); + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); if (r) return r; - adev->irq.ih2.use_doorbell = true; - adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; - r = amdgpu_irq_init(adev); return r; @@ -613,10 +514,7 @@ static int vega10_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih2); - amdgpu_ih_ring_fini(adev, &adev->irq.ih1); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); return 0; } @@ -684,15 +582,11 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev, def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL); field_val = enable ? 0 : 1; /** - * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE - * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field. + * Vega10/12 and RAVEN don't have IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field. */ - if (adev->asic_type > CHIP_VEGA10) { - data = REG_SET_FIELD(data, IH_CLK_CTRL, - IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val); + if (adev->asic_type == CHIP_RENOIR) data = REG_SET_FIELD(data, IH_CLK_CTRL, IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val); - } data = REG_SET_FIELD(data, IH_CLK_CTRL, DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); @@ -715,7 +609,7 @@ static int vega10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; vega10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } @@ -745,7 +639,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega10_ih_funcs = { .get_wptr = vega10_ih_get_wptr, - .decode_iv = vega10_ih_decode_iv, + .decode_iv = amdgpu_ih_decode_iv_helper, .set_rptr = vega10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c new file mode 100644 index 000000000000..f51dfc38ac65 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -0,0 +1,706 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "soc15.h" + +#include "oss/osssys_4_2_0_offset.h" +#include "oss/osssys_4_2_0_sh_mask.h" + +#include "soc15_common.h" +#include "vega20_ih.h" + +#define MAX_REARM_RETRY 10 + +#define mmIH_CHICKEN_ALDEBARAN 0x18d +#define mmIH_CHICKEN_ALDEBARAN_BASE_IDX 0 + +static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev); + +/** + * vega20_ih_init_register_offset - Initialize register offset for ih rings + * + * @adev: amdgpu_device pointer + * + * Initialize register offset ih rings (VEGA20). + */ +static void vega20_ih_init_register_offset(struct amdgpu_device *adev) +{ + struct amdgpu_ih_regs *ih_regs; + + if (adev->irq.ih.ring_size) { + ih_regs = &adev->irq.ih.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR); + ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO); + ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL; + } + + if (adev->irq.ih1.ring_size) { + ih_regs = &adev->irq.ih1.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING1); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING1); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1; + } + + if (adev->irq.ih2.ring_size) { + ih_regs = &adev->irq.ih2.ih_regs; + ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_RING2); + ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI_RING2); + ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); + ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2); + ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING2; + } +} + +/** + * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * @enable: true - enable the interrupts, false - disable the interrupts + * + * Toggle the interrupt ring buffer (VEGA20) + */ +static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, + bool enable) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); + + /* enable_intr field is only valid in ring0 */ + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); + if (amdgpu_sriov_vf(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (enable) { + ih->enabled = true; + } else { + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_rptr, 0); + WREG32(ih_regs->ih_rb_wptr, 0); + ih->enabled = false; + ih->rptr = 0; + } + + return 0; +} + +/** + * vega20_ih_toggle_interrupts - Toggle all the available interrupt ring buffers + * + * @adev: amdgpu_device pointer + * @enable: enable or disable interrupt ring buffers + * + * Toggle all the available interrupt ring buffers (VEGA20). + */ +static int vega20_ih_toggle_interrupts(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + int i; + int r; + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + r = vega20_ih_toggle_ring_interrupts(adev, ih[i], enable); + if (r) + return r; + } + } + + return 0; +} + +static uint32_t vega20_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) +{ + int rb_bufsz = order_base_2(ih->ring_size / 4); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + MC_SPACE, ih->use_bus_addr ? 1 : 4); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register + * value is written to memory + */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_WRITEBACK_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); + + return ih_rb_cntl; +} + +static uint32_t vega20_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) +{ + u32 ih_doorbell_rtpr = 0; + + if (ih->use_doorbell) { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, OFFSET, + ih->doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 1); + } else { + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, + ENABLE, 0); + } + return ih_doorbell_rtpr; +} + +/** + * vega20_ih_enable_ring - enable an ih ring buffer + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Enable an ih ring buffer (VEGA20) + */ +static int vega20_ih_enable_ring(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + uint32_t tmp; + + ih_regs = &ih->ih_regs; + + /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ + WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8); + WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff); + + tmp = RREG32(ih_regs->ih_rb_cntl); + tmp = vega20_ih_rb_cntl(ih, tmp); + if (ih == &adev->irq.ih) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); + if (ih == &adev->irq.ih1) + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); + if (amdgpu_sriov_vf(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } + + if (ih == &adev->irq.ih) { + /* set the ih ring 0 writeback address whether it's enabled or not */ + WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr)); + WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF); + } + + /* set rptr, wptr to 0 */ + WREG32(ih_regs->ih_rb_wptr, 0); + WREG32(ih_regs->ih_rb_rptr, 0); + + WREG32(ih_regs->ih_doorbell_rptr, vega20_ih_doorbell_rptr(ih)); + + return 0; +} + +/** + * vega20_ih_reroute_ih - reroute VMC/UTCL2 ih to an ih ring + * + * @adev: amdgpu_device pointer + * + * Reroute VMC and UMC interrupts on primary ih ring to + * ih ring 1 so they won't lose when bunches of page faults + * interrupts overwhelms the interrupt handler(VEGA20) + */ +static void vega20_ih_reroute_ih(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* vega20 ih reroute will go through psp this + * function is used for newer asics starting arcturus + */ + if (adev->asic_type >= CHIP_ARCTURUS) { + /* Reroute to IH ring 1 for VMC */ + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12); + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); + + /* Reroute IH ring 1 for UTCL2 */ + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B); + tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp); + } +} + +/** + * vega20_ih_irq_init - init and enable the interrupt ring + * + * @adev: amdgpu_device pointer + * + * Allocate a ring buffer for the interrupt controller, + * enable the RLC, disable interrupts, enable the IH + * ring buffer and enable it (VI). + * Called at device load and reume. + * Returns 0 for success, errors for failure. + */ +static int vega20_ih_irq_init(struct amdgpu_device *adev) +{ + struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; + u32 ih_chicken; + int ret; + int i; + + /* disable irqs */ + ret = vega20_ih_toggle_interrupts(adev, false); + if (ret) + return ret; + + adev->nbio.funcs->ih_control(adev); + + if (adev->asic_type == CHIP_ARCTURUS && + adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); + if (adev->irq.ih.use_bus_addr) { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_GPA_ENABLE, 1); + } + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); + } + + /* psp firmware won't program IH_CHICKEN for aldebaran + * driver needs to program it properly according to + * MC_SPACE type in IH_RB_CNTL */ + if (adev->asic_type == CHIP_ALDEBARAN) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN); + if (adev->irq.ih.use_bus_addr) { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_GPA_ENABLE, 1); + } + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken); + } + + for (i = 0; i < ARRAY_SIZE(ih); i++) { + if (ih[i]->ring_size) { + if (i == 1) + vega20_ih_reroute_ih(adev); + ret = vega20_ih_enable_ring(adev, ih[i]); + if (ret) + return ret; + } + } + + pci_set_master(adev->pdev); + + /* enable interrupts */ + ret = vega20_ih_toggle_interrupts(adev, true); + if (ret) + return ret; + + if (adev->irq.ih_soft.ring_size) + adev->irq.ih_soft.enabled = true; + + return 0; +} + +/** + * vega20_ih_irq_disable - disable interrupts + * + * @adev: amdgpu_device pointer + * + * Disable interrupts on the hw (VEGA20). + */ +static void vega20_ih_irq_disable(struct amdgpu_device *adev) +{ + vega20_ih_toggle_interrupts(adev, false); + + /* Wait and acknowledge irq */ + mdelay(1); +} + +/** + * vega20_ih_get_wptr - get the IH ring buffer wptr + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Get the IH ring buffer wptr from either the register + * or the writeback memory buffer (VEGA20). Also check for + * ring buffer overflow and deal with it. + * Returns the value of the wptr. + */ +static u32 vega20_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + u32 wptr, tmp; + struct amdgpu_ih_regs *ih_regs; + + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } + + ih_regs = &ih->ih_regs; + + /* Double check that the overflow wasn't already cleared. */ + wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 32). Hopefully + * this should allow us to catchup. + */ + tmp = (wptr + 32) & ih->ptr_mask; + dev_warn(adev->dev, "IH ring buffer overflow " + "(0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, tmp); + ih->rptr = tmp; + + tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + +out: + return (wptr & ih->ptr_mask); +} + +/** + * vega20_ih_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + */ +static void vega20_ih_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t v = 0; + uint32_t i = 0; + struct amdgpu_ih_regs *ih_regs; + + ih_regs = &ih->ih_regs; + + /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + +/** + * vega20_ih_set_rptr - set the IH ring buffer rptr + * + * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer + * + * Set the IH ring buffer rptr. + */ +static void vega20_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + struct amdgpu_ih_regs *ih_regs; + + if (ih->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + *ih->rptr_cpu = ih->rptr; + WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + vega20_ih_irq_rearm(adev, ih); + } else { + ih_regs = &ih->ih_regs; + WREG32(ih_regs->ih_rb_rptr, ih->rptr); + } +} + +/** + * vega20_ih_self_irq - dispatch work for ring 1 and 2 + * + * @adev: amdgpu_device pointer + * @source: irq source + * @entry: IV with WPTR update + * + * Update the WPTR from the IV and schedule work to handle the entries. + */ +static int vega20_ih_self_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + switch (entry->ring_id) { + case 1: + schedule_work(&adev->irq.ih1_work); + break; + case 2: + schedule_work(&adev->irq.ih2_work); + break; + default: break; + } + return 0; +} + +static const struct amdgpu_irq_src_funcs vega20_ih_self_irq_funcs = { + .process = vega20_ih_self_irq, +}; + +static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev) +{ + adev->irq.self_irq.num_types = 0; + adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs; +} + +static int vega20_ih_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega20_ih_set_interrupt_funcs(adev); + vega20_ih_set_self_irq_funcs(adev); + return 0; +} + +static int vega20_ih_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, + &adev->irq.self_irq); + if (r) + return r; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); + if (r) + return r; + + adev->irq.ih.use_doorbell = true; + adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; + + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + + /* initialize ih control registers offset */ + vega20_ih_init_register_offset(adev); + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true); + if (r) + return r; + + r = amdgpu_irq_init(adev); + + return r; +} + +static int vega20_ih_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_irq_fini_sw(adev); + + return 0; +} + +static int vega20_ih_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vega20_ih_irq_init(adev); + if (r) + return r; + + return 0; +} + +static int vega20_ih_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega20_ih_irq_disable(adev); + + return 0; +} + +static int vega20_ih_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return vega20_ih_hw_fini(adev); +} + +static int vega20_ih_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return vega20_ih_hw_init(adev); +} + +static bool vega20_ih_is_idle(void *handle) +{ + /* todo */ + return true; +} + +static int vega20_ih_wait_for_idle(void *handle) +{ + /* todo */ + return -ETIMEDOUT; +} + +static int vega20_ih_soft_reset(void *handle) +{ + /* todo */ + + return 0; +} + +static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def, field_val; + + if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) { + def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL); + field_val = enable ? 0 : 1; + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DYN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + REG_CLK_SOFT_OVERRIDE, field_val); + if (def != data) + WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data); + } +} + +static int vega20_ih_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega20_ih_update_clockgating_state(adev, + state == AMD_CG_STATE_GATE); + return 0; + +} + +static int vega20_ih_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs vega20_ih_ip_funcs = { + .name = "vega20_ih", + .early_init = vega20_ih_early_init, + .late_init = NULL, + .sw_init = vega20_ih_sw_init, + .sw_fini = vega20_ih_sw_fini, + .hw_init = vega20_ih_hw_init, + .hw_fini = vega20_ih_hw_fini, + .suspend = vega20_ih_suspend, + .resume = vega20_ih_resume, + .is_idle = vega20_ih_is_idle, + .wait_for_idle = vega20_ih_wait_for_idle, + .soft_reset = vega20_ih_soft_reset, + .set_clockgating_state = vega20_ih_set_clockgating_state, + .set_powergating_state = vega20_ih_set_powergating_state, +}; + +static const struct amdgpu_ih_funcs vega20_ih_funcs = { + .get_wptr = vega20_ih_get_wptr, + .decode_iv = amdgpu_ih_decode_iv_helper, + .set_rptr = vega20_ih_set_rptr +}; + +static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev) +{ + adev->irq.ih_funcs = &vega20_ih_funcs; +} + +const struct amdgpu_ip_block_version vega20_ih_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &vega20_ih_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.h b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h new file mode 100644 index 000000000000..7af6d8758ee3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VEGA20_IH_H__ +#define __VEGA20_IH_H__ + +extern const struct amd_ip_funcs vega20_ih_ip_funcs; +extern const struct amdgpu_ip_block_version vega20_ih_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index f1b171e30774..fe9a7cc8d9eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -24,6 +24,8 @@ #include <linux/pci.h> #include <linux/slab.h> +#include <drm/amdgpu_drm.h> + #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_ih.h" @@ -75,10 +77,221 @@ #if defined(CONFIG_DRM_AMD_ACP) #include "amdgpu_acp.h" #endif -#include "dce_virtual.h" +#include "amdgpu_vkms.h" #include "mxgpu_vi.h" #include "amdgpu_dm.h" +#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 +#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L +#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L +#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L +#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L +#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L +#define ixPCIE_L1_PM_SUB_CNTL 0x378 +#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L +#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L +#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L +#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L +#define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L +#define LINK_CAP 0x64 +#define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define ixCPM_CONTROL 0x1400118 +#define ixPCIE_LC_CNTL7 0x100100BC +#define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L +#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007 +#define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009 +#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L +#define PCIE_L1_PM_SUB_CNTL 0x378 +#define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \ + (asic_type <= CHIP_POLARIS12) && \ + (rid >= 0x6E)) +/* Topaz */ +static const struct amdgpu_video_codecs topaz_video_codecs_encode = +{ + .codec_count = 0, + .codec_array = NULL, +}; + +/* Tonga, CZ, ST, Fiji */ +static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 4096, + .max_height = 2304, + .max_pixels_per_frame = 4096 * 2304, + .max_level = 0, + }, +}; + +static const struct amdgpu_video_codecs tonga_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array), + .codec_array = tonga_video_codecs_encode_array, +}; + +/* Polaris */ +static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 4096, + .max_height = 2304, + .max_pixels_per_frame = 4096 * 2304, + .max_level = 0, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, + .max_width = 4096, + .max_height = 2304, + .max_pixels_per_frame = 4096 * 2304, + .max_level = 0, + }, +}; + +static const struct amdgpu_video_codecs polaris_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array), + .codec_array = polaris_video_codecs_encode_array, +}; + +/* Topaz */ +static const struct amdgpu_video_codecs topaz_video_codecs_decode = +{ + .codec_count = 0, + .codec_array = NULL, +}; + +/* Tonga */ +static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 3, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 5, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 52, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 4, + }, +}; + +static const struct amdgpu_video_codecs tonga_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array), + .codec_array = tonga_video_codecs_decode_array, +}; + +/* CZ, ST, Fiji, Polaris */ +static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 3, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 5, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 52, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 4, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 186, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 0, + }, +}; + +static const struct amdgpu_video_codecs cz_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array), + .codec_array = cz_video_codecs_decode_array, +}; + +static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode, + const struct amdgpu_video_codecs **codecs) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + if (encode) + *codecs = &topaz_video_codecs_encode; + else + *codecs = &topaz_video_codecs_decode; + return 0; + case CHIP_TONGA: + if (encode) + *codecs = &tonga_video_codecs_encode; + else + *codecs = &tonga_video_codecs_decode; + return 0; + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + if (encode) + *codecs = &polaris_video_codecs_encode; + else + *codecs = &cz_video_codecs_decode; + return 0; + case CHIP_FIJI: + case CHIP_CARRIZO: + case CHIP_STONEY: + if (encode) + *codecs = &tonga_video_codecs_encode; + else + *codecs = &cz_video_codecs_decode; + return 0; + default: + return -EINVAL; + } +} + /* * Indirect registers accessor */ @@ -448,27 +661,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static void vi_detect_hw_virtualization(struct amdgpu_device *adev) -{ - uint32_t reg = 0; - - if (adev->asic_type == CHIP_TONGA || - adev->asic_type == CHIP_FIJI) { - reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); - /* bit0: 0 means pf and 1 means vf */ - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - /* bit31: 0 means disable IOV and 1 means enable */ - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - } - - if (reg == 0) { - if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } -} - static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { {mmGRBM_STATUS}, {mmGRBM_STATUS2}, @@ -663,11 +855,21 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) +/** + * vi_asic_pci_config_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Use PCI Config method to reset the GPU. + * + * Returns 0 for success. + */ +static int vi_asic_pci_config_reset(struct amdgpu_device *adev) { u32 i; + int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); /* disable BM */ pci_clear_master(adev->pdev); @@ -682,67 +884,30 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* enable BM */ pci_set_master(adev->pdev); adev->has_hw_reset = true; - return 0; + r = 0; + break; } udelay(1); } - return -EINVAL; -} - -int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) -{ - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { - *cap = false; - return -ENOENT; - } + amdgpu_atombios_scratch_regs_engine_hung(adev, false); - return pp_funcs->get_asic_baco_capability(pp_handle, cap); + return r; } -int smu7_asic_baco_reset(struct amdgpu_device *adev) +static bool vi_asic_supports_baco(struct amdgpu_device *adev) { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; - - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; - - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; - - dev_info(adev->dev, "GPU BACO reset\n"); - - return 0; -} - -/** - * vi_asic_pci_config_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * - * Use PCI Config method to reset the GPU. - * - * Returns 0 for success. - */ -static int vi_asic_pci_config_reset(struct amdgpu_device *adev) -{ - int r; - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = vi_gpu_pci_config_reset(adev); - - amdgpu_atombios_scratch_regs_engine_hung(adev, false); - - return r; + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_TOPAZ: + return amdgpu_dpm_is_baco_supported(adev); + default: + return false; + } } static enum amd_reset_method @@ -750,6 +915,14 @@ vi_asic_reset_method(struct amdgpu_device *adev) { bool baco_reset; + if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY || + amdgpu_reset_method == AMD_RESET_METHOD_BACO) + return amdgpu_reset_method; + + if (amdgpu_reset_method != -1) + dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", + amdgpu_reset_method); + switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: @@ -757,7 +930,7 @@ vi_asic_reset_method(struct amdgpu_device *adev) case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_TOPAZ: - smu7_asic_get_baco_capability(adev, &baco_reset); + baco_reset = amdgpu_dpm_is_baco_supported(adev); break; default: baco_reset = false; @@ -784,10 +957,10 @@ static int vi_asic_reset(struct amdgpu_device *adev) int r; if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - if (!adev->in_suspend) - amdgpu_inc_vram_lost(adev); - r = smu7_asic_baco_reset(adev); + dev_info(adev->dev, "BACO reset\n"); + r = amdgpu_dpm_baco_reset(adev); } else { + dev_info(adev->dev, "PCI CONFIG reset\n"); r = vi_asic_pci_config_reset(adev); } @@ -942,13 +1115,178 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) /* todo */ } +static void vi_enable_aspm(struct amdgpu_device *adev) +{ + u32 data, orig; + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); + data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT << + PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; + data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT << + PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL, data); +} + static void vi_program_aspm(struct amdgpu_device *adev) { + u32 data, data1, orig; + bool bL1SS = false; + bool bClkReqSupport = true; - if (amdgpu_aspm == 0) + if (!amdgpu_aspm) return; - /* todo */ + if (adev->flags & AMD_IS_APU || + adev->asic_type < CHIP_POLARIS10) + return; + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); + data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; + data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT; + data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL3, data); + + orig = data = RREG32_PCIE(ixPCIE_P_CNTL); + data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_P_CNTL, data); + + data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE); + pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1); + if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK && + (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK | + PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK | + PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK | + PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) { + bL1SS = true; + } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK | + PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK | + PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK | + PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) { + bL1SS = true; + } + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6); + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL6, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL); + data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data); + + pci_read_config_dword(adev->pdev, LINK_CAP, &data); + if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK)) + bClkReqSupport = false; + + if (bClkReqSupport) { + orig = data = RREG32_SMC(ixTHM_CLK_CNTL); + data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK); + data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | + (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT); + if (orig != data) + WREG32_SMC(ixTHM_CLK_CNTL, data); + + orig = data = RREG32_SMC(ixMISC_CLK_CTRL); + data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK | + MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK); + data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) | + (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT); + data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT); + if (orig != data) + WREG32_SMC(ixMISC_CLK_CTRL, data); + + orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL); + data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK; + if (orig != data) + WREG32_SMC(ixCG_CLKPIN_CNTL, data); + + orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2); + data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK; + if (orig != data) + WREG32_SMC(ixCG_CLKPIN_CNTL, data); + + orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL); + data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK; + data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT); + if (orig != data) + WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data); + + orig = data = RREG32_PCIE(ixCPM_CONTROL); + data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK | + CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK); + if (orig != data) + WREG32_PCIE(ixCPM_CONTROL, data); + + orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL); + data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK; + data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT); + if (orig != data) + WREG32_PCIE(ixPCIE_CONFIG_CNTL, data); + + orig = data = RREG32(mmBIF_CLK_CTRL); + data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK; + if (orig != data) + WREG32(mmBIF_CLK_CTRL, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7); + data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL7, data); + + orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG); + data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_HW_DEBUG, data); + + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2); + data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; + data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; + if (bL1SS) + data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL2, data); + + } + + vi_enable_aspm(adev); + + data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); + data1 = RREG32_PCIE(ixPCIE_LC_STATUS1); + if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) && + data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK && + data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) { + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_CNTL, data); + } + + if ((adev->asic_type == CHIP_POLARIS12 && + !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) || + ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) { + orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL); + data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK; + if (orig != data) + WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data); + } } static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, @@ -1100,6 +1438,10 @@ static bool vi_need_reset_on_init(struct amdgpu_device *adev) return false; } +static void vi_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, @@ -1119,6 +1461,9 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .get_pcie_usage = &vi_get_pcie_usage, .need_reset_on_init = &vi_need_reset_on_init, .get_pcie_replay_count = &vi_get_pcie_replay_count, + .supports_baco = &vi_asic_supports_baco, + .pre_asic_init = &vi_pre_asic_init, + .query_video_codecs = &vi_query_video_codecs, }; #define CZ_REV_BRISTOL(rev) \ @@ -1540,8 +1885,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_MC, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { @@ -1559,8 +1903,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_SDMA, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { @@ -1578,8 +1921,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_HDP, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } @@ -1593,8 +1935,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_BIF, PP_STATE_SUPPORT_LS, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { if (state == AMD_CG_STATE_UNGATE) @@ -1606,8 +1947,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_BIF, PP_STATE_SUPPORT_CG, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { @@ -1621,8 +1961,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_DRM, PP_STATE_SUPPORT_LS, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { @@ -1636,8 +1975,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_ROM, PP_STATE_SUPPORT_CG, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } return 0; } @@ -1678,6 +2016,7 @@ static int vi_common_set_clockgating_state(void *handle, case CHIP_POLARIS12: case CHIP_VEGAM: vi_common_set_clockgating_state_by_smu(adev, state); + break; default: break; } @@ -1746,14 +2085,13 @@ static const struct amdgpu_ip_block_version vi_common_ip_block = .funcs = &vi_common_ip_funcs, }; -int vi_set_ip_blocks(struct amdgpu_device *adev) +void vi_set_virt_ops(struct amdgpu_device *adev) { - /* in early init stage, vbios code won't work */ - vi_detect_hw_virtualization(adev); - - if (amdgpu_sriov_vf(adev)) - adev->virt.ops = &xgpu_vi_virt_ops; + adev->virt.ops = &xgpu_vi_virt_ops; +} +int vi_set_ip_blocks(struct amdgpu_device *adev) +{ switch (adev->asic_type) { case CHIP_TOPAZ: /* topaz has no DCE, UVD, VCE */ @@ -1764,7 +2102,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); break; case CHIP_FIJI: amdgpu_device_ip_block_add(adev, &vi_common_ip_block); @@ -1774,7 +2112,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); @@ -1794,7 +2132,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); @@ -1817,7 +2155,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); @@ -1835,7 +2173,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); @@ -1856,7 +2194,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 40d4174913a4..9718f98f8533 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -28,10 +28,9 @@ void vi_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); +void vi_set_virt_ops(struct amdgpu_device *adev); int vi_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); -int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); -int smu7_asic_baco_reset(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index 19ddd2312e00..80ce42aacc0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -67,8 +67,6 @@ #define HPD4_REGISTER_OFFSET (0x18b8 - 0x1898) #define HPD5_REGISTER_OFFSET (0x18c0 - 0x1898) -#define AMDGPU_NUM_OF_VMIDS 8 - #define PIPEID(x) ((x) << 0) #define MEID(x) ((x) << 2) #define VMID(x) ((x) << 4) @@ -332,7 +330,7 @@ # define PACKET3_DMA_DATA_CMD_SAIC (1 << 28) # define PACKET3_DMA_DATA_CMD_DAIC (1 << 29) # define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30) -#define PACKET3_AQUIRE_MEM 0x58 +#define PACKET3_ACQUIRE_MEM 0x58 #define PACKET3_REWIND 0x59 #define PACKET3_LOAD_UCONFIG_REG 0x5E #define PACKET3_LOAD_SH_REG 0x5F |