diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
44 files changed, 1266 insertions, 195 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 4232ab27f990..0051fb1b437f 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -71,6 +71,17 @@ config DRM_AMDGPU_USERPTR This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it isn't already selected to enabled full userptr support. +config DRM_AMD_ISP + bool "Enable AMD Image Signal Processor IP support" + depends on DRM_AMDGPU + select MFD_CORE + select PM_GENERIC_DOMAINS if PM + help + Choose this option to enable ISP IP support for AMD SOCs. + This adds the ISP (Image Signal Processor) IP driver and wires + it up into the amdgpu driver. It is required for cameras + on APUs which utilize mipi cameras. + config DRM_AMDGPU_WERROR bool "Force the compiler to throw an error instead of a warning when compiling" depends on DRM_AMDGPU diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index dfd2d594e143..9dd8294032ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -323,4 +323,12 @@ amdgpu-y += $(AMD_DISPLAY_FILES) endif +# add isp block +ifneq ($(CONFIG_DRM_AMD_ISP),) +amdgpu-y += \ + amdgpu_isp.o \ + isp_v4_1_0.o \ + isp_v4_1_1.o +endif + obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7dab4768cee6..137a88b8de45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -112,6 +112,9 @@ #include "amdgpu_xcp.h" #include "amdgpu_seq64.h" #include "amdgpu_reg_state.h" +#if defined(CONFIG_DRM_AMD_ISP) +#include "amdgpu_isp.h" +#endif #define MAX_GPU_INSTANCE 64 @@ -221,7 +224,6 @@ extern int amdgpu_mes; extern int amdgpu_mes_log_enable; extern int amdgpu_mes_kiq; extern int amdgpu_uni_mes; -extern int amdgpu_jpeg_test; extern int amdgpu_noretry; extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; @@ -721,6 +723,7 @@ enum amd_hw_ip_block_type { XGMI_HWIP, DCI_HWIP, PCIE_HWIP, + ISP_HWIP, MAX_HWIP }; @@ -1018,7 +1021,6 @@ struct amdgpu_device { /* jpeg */ struct amdgpu_jpeg jpeg; - bool enable_jpeg_test; /* vpe */ struct amdgpu_vpe vpe; @@ -1048,6 +1050,11 @@ struct amdgpu_device { /* display related functionality */ struct amdgpu_display_manager dm; +#if defined(CONFIG_DRM_AMD_ISP) + /* isp */ + struct amdgpu_isp isp; +#endif + /* mes */ bool enable_mes; bool enable_mes_kiq; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index f932bec6e534..f873dd3cae16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -433,7 +433,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, mem_channel_number = vram_info->v30.channel_num; mem_channel_width = vram_info->v30.channel_width; if (vram_width) - *vram_width = mem_channel_number * (1 << mem_channel_width); + *vram_width = mem_channel_number * 16; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 183e219b6a85..b27336a05aae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5069,7 +5069,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, struct amdgpu_hive_info *hive = NULL; if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) { - amdgpu_virt_ready_to_reset(adev); + if (!amdgpu_ras_get_fed_status(adev)) + amdgpu_virt_ready_to_reset(adev); amdgpu_virt_wait_reset(adev); clear_bit(AMDGPU_HOST_FLR, &reset_context->flags); r = amdgpu_virt_request_full_gpu(adev, true); @@ -5837,6 +5838,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ /* Actual ASIC resets if needed.*/ /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { + if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) { + dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n"); + amdgpu_ras_set_fed(adev, true); + set_bit(AMDGPU_HOST_FLR, &reset_context->flags); + } + r = amdgpu_device_reset_sriov(adev, reset_context); if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) { amdgpu_virt_release_full_gpu(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 0cb8aea93a70..90475ddf1c03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -106,6 +106,9 @@ #include "jpeg_v5_0_0.h" #include "amdgpu_vpe.h" +#if defined(CONFIG_DRM_AMD_ISP) +#include "amdgpu_isp.h" +#endif #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); @@ -225,6 +228,7 @@ static int hw_id_map[MAX_HWIP] = { [DCI_HWIP] = DCI_HWID, [PCIE_HWIP] = PCIE_HWID, [VPE_HWIP] = VPE_HWID, + [ISP_HWIP] = ISP_HWID, }; static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) @@ -711,6 +715,12 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, adev->sdma.sdma_mask &= ~(1U << harvest_info->list[i].number_instance); break; +#if defined(CONFIG_DRM_AMD_ISP) + case ISP_HWID: + adev->isp.harvest_config |= + ~(1U << harvest_info->list[i].number_instance); + break; +#endif default: break; } @@ -2294,8 +2304,6 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(5, 0, 0): amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block); - if (amdgpu_jpeg_test) - adev->enable_jpeg_test = true; break; default: dev_err(adev->dev, @@ -2378,6 +2386,24 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) return 0; } +static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DRM_AMD_ISP) + switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { + case IP_VERSION(4, 1, 0): + amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block); + break; + case IP_VERSION(4, 1, 1): + amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block); + break; + default: + break; + } +#endif + + return 0; +} + int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) { int r; @@ -2904,6 +2930,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; + r = amdgpu_discovery_set_isp_ip_blocks(adev); + if (r) + return r; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9f2db858c6e0..78089f2f79f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -204,7 +204,6 @@ int amdgpu_force_asic_type = -1; int amdgpu_tmz = -1; /* auto */ uint amdgpu_freesync_vid_mode; int amdgpu_reset_method = -1; /* auto */ -int amdgpu_jpeg_test; int amdgpu_num_kcq = -1; int amdgpu_smartshift_bias; int amdgpu_use_xgmi_p2p = 1; @@ -940,9 +939,6 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)"); module_param_named(reset_method, amdgpu_reset_method, int, 0644); -MODULE_PARM_DESC(jpeg_test, "jpeg test(0 = disable (default), 1 = enable)"); -module_param_named(jpeg_test, amdgpu_jpeg_test, int, 0444); - /** * DOC: bad_page_threshold (int) Bad page threshold is specifies the * threshold value of faulty pages detected by RAS ECC, which may diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 46889bfe5669..aad2027e5c7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -343,11 +343,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) return -EINVAL; - if ((flags & AMDGPU_GEM_CREATE_GFX12_DCC) && - ((amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) || - !(args->in.domains & AMDGPU_GEM_DOMAIN_VRAM))) - return -EINVAL; - if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) { DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n"); return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 322b8ff67cde..3a7622611916 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -718,7 +718,11 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, ndw += kiq->pmf->invalidate_tlbs_size; spin_lock(&adev->gfx.kiq[inst].ring_lock); - amdgpu_ring_alloc(ring, ndw); + r = amdgpu_ring_alloc(ring, ndw); + if (r) { + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + goto error_unlock_reset; + } if (adev->gmc.flush_tlb_needs_extra_type_2) kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 013ff373e067..19ce4da285e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -466,7 +466,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); - } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) && + } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) || + (client_id == SOC15_IH_CLIENTID_ISP)) && adev->irq.virq[src_id]) { generic_handle_domain_irq(adev->irq.domain, src_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c new file mode 100644 index 000000000000..4766e99dd98f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#include <linux/firmware.h> +#include <linux/mfd/core.h> + +#include "amdgpu.h" +#include "amdgpu_isp.h" +#include "isp_v4_1_0.h" +#include "isp_v4_1_1.h" + +static int isp_sw_init(void *handle) +{ + return 0; +} + +static int isp_sw_fini(void *handle) +{ + return 0; +} + +/** + * isp_hw_init - start and test isp block + * + * @handle: handle for amdgpu_device pointer + * + */ +static int isp_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_isp *isp = &adev->isp; + + const struct amdgpu_ip_block *ip_block = + amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP); + + if (!ip_block) + return -EINVAL; + + if (isp->funcs->hw_init != NULL) + return isp->funcs->hw_init(isp); + + return -ENODEV; +} + +/** + * isp_hw_fini - stop the hardware block + * + * @handle: handle for amdgpu_device pointer + * + */ +static int isp_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_isp *isp = &adev->isp; + + if (isp->funcs->hw_fini != NULL) + return isp->funcs->hw_fini(isp); + + return -ENODEV; +} + +static int isp_suspend(void *handle) +{ + return 0; +} + +static int isp_resume(void *handle) +{ + return 0; +} + +static int isp_load_fw_by_psp(struct amdgpu_device *adev) +{ + const struct common_firmware_header *hdr; + char ucode_prefix[10]; + int r = 0; + + /* get isp fw binary name and path */ + amdgpu_ucode_ip_version_decode(adev, ISP_HWIP, ucode_prefix, + sizeof(ucode_prefix)); + + /* read isp fw */ + r = amdgpu_ucode_request(adev, &adev->isp.fw, "amdgpu/%s.bin", ucode_prefix); + if (r) { + amdgpu_ucode_release(&adev->isp.fw); + return r; + } + + hdr = (const struct common_firmware_header *)adev->isp.fw->data; + + adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].ucode_id = + AMDGPU_UCODE_ID_ISP; + adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].fw = adev->isp.fw; + + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + + return r; +} + +static int isp_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_isp *isp = &adev->isp; + + switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) { + case IP_VERSION(4, 1, 0): + isp_v4_1_0_set_isp_funcs(isp); + break; + case IP_VERSION(4, 1, 1): + isp_v4_1_1_set_isp_funcs(isp); + break; + default: + return -EINVAL; + } + + isp->adev = adev; + isp->parent = adev->dev; + + if (isp_load_fw_by_psp(adev)) { + DRM_DEBUG_DRIVER("%s: isp fw load failed\n", __func__); + return -ENOENT; + } + + return 0; +} + +static bool isp_is_idle(void *handle) +{ + return true; +} + +static int isp_wait_for_idle(void *handle) +{ + return 0; +} + +static int isp_soft_reset(void *handle) +{ + return 0; +} + +static int isp_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int isp_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static const struct amd_ip_funcs isp_ip_funcs = { + .name = "isp_ip", + .early_init = isp_early_init, + .late_init = NULL, + .sw_init = isp_sw_init, + .sw_fini = isp_sw_fini, + .hw_init = isp_hw_init, + .hw_fini = isp_hw_fini, + .suspend = isp_suspend, + .resume = isp_resume, + .is_idle = isp_is_idle, + .wait_for_idle = isp_wait_for_idle, + .soft_reset = isp_soft_reset, + .set_clockgating_state = isp_set_clockgating_state, + .set_powergating_state = isp_set_powergating_state, +}; + +const struct amdgpu_ip_block_version isp_v4_1_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_ISP, + .major = 4, + .minor = 1, + .rev = 0, + .funcs = &isp_ip_funcs, +}; + +const struct amdgpu_ip_block_version isp_v4_1_1_ip_block = { + .type = AMD_IP_BLOCK_TYPE_ISP, + .major = 4, + .minor = 1, + .rev = 1, + .funcs = &isp_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h new file mode 100644 index 000000000000..44e2ea8c9728 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#ifndef __AMDGPU_ISP_H__ +#define __AMDGPU_ISP_H__ + +#define ISP_REGS_OFFSET_END 0x629A4 + +struct amdgpu_isp; + +struct isp_platform_data { + void *adev; + u32 asic_type; + resource_size_t base_rmmio_size; +}; + +struct isp_funcs { + int (*hw_init)(struct amdgpu_isp *isp); + int (*hw_fini)(struct amdgpu_isp *isp); +}; + +struct amdgpu_isp { + struct device *parent; + struct amdgpu_device *adev; + const struct isp_funcs *funcs; + struct mfd_cell *isp_cell; + struct resource *isp_res; + struct isp_platform_data *isp_pdata; + unsigned int harvest_config; + const struct firmware *fw; +}; + +extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block; +extern const struct amdgpu_ip_block_version isp_v4_1_1_ip_block; + +#endif /* __AMDGPU_ISP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index aea31d61d991..f9cdd873ac9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -60,6 +60,37 @@ RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \ }) +#define WREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \ + do { \ + WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \ + regUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \ + regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \ + WREG32_SOC15( \ + JPEG, GET_INST(JPEG, inst_idx), \ + regUVD_DPG_LMA_CTL, \ + (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \ + indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ + } while (0) + +#define RREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, mask_en) \ + do { \ + WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \ + regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \ + WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \ + regUVD_DPG_LMA_CTL, \ + (UVD_DPG_LMA_CTL__MASK_EN_MASK | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + RREG32_SOC15(JPEG, inst_idx, regUVD_DPG_LMA_DATA); \ + } while (0) + +#define ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, offset, value, indirect) \ + do { \ + *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = offset; \ + *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = value; \ + } while (0) + struct amdgpu_jpeg_reg{ unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS]; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a1b7438c43dc..e32161f6b67a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1599,36 +1599,39 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) u64 size; if (dma_resv_trylock(bo->tbo.base.resv)) { - - switch (bo->tbo.resource->mem_type) { - case TTM_PL_VRAM: - if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) - placement = "VRAM VISIBLE"; - else - placement = "VRAM"; - break; - case TTM_PL_TT: - placement = "GTT"; - break; - case AMDGPU_PL_GDS: - placement = "GDS"; - break; - case AMDGPU_PL_GWS: - placement = "GWS"; - break; - case AMDGPU_PL_OA: - placement = "OA"; - break; - case AMDGPU_PL_PREEMPT: - placement = "PREEMPTIBLE"; - break; - case AMDGPU_PL_DOORBELL: - placement = "DOORBELL"; - break; - case TTM_PL_SYSTEM: - default: - placement = "CPU"; - break; + if (!bo->tbo.resource) { + placement = "NONE"; + } else { + switch (bo->tbo.resource->mem_type) { + case TTM_PL_VRAM: + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) + placement = "VRAM VISIBLE"; + else + placement = "VRAM"; + break; + case TTM_PL_TT: + placement = "GTT"; + break; + case AMDGPU_PL_GDS: + placement = "GDS"; + break; + case AMDGPU_PL_GWS: + placement = "GWS"; + break; + case AMDGPU_PL_OA: + placement = "OA"; + break; + case AMDGPU_PL_PREEMPT: + placement = "PREEMPTIBLE"; + break; + case AMDGPU_PL_DOORBELL: + placement = "DOORBELL"; + break; + case TTM_PL_SYSTEM: + default: + placement = "CPU"; + break; + } } dma_resv_unlock(bo->tbo.base.resv); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index f89de056a828..e15814d9ca17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2559,6 +2559,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_JPEG_RAM: *type = GFX_FW_TYPE_JPEG_RAM; break; + case AMDGPU_UCODE_ID_ISP: + *type = GFX_FW_TYPE_ISP; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 68e9935028db..4edd8e333d36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -120,7 +120,7 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) -#define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms +#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms @@ -1384,10 +1384,17 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i memset(&qctx, 0, sizeof(qctx)); qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); + + if (!down_read_trylock(&adev->reset_domain->sem)) { + ret = -EIO; + goto out_fini_err_data; + } + ret = amdgpu_ras_query_error_status_helper(adev, info, &err_data, &qctx, error_query_mode); + up_read(&adev->reset_domain->sem); if (ret) goto out_fini_err_data; @@ -2105,10 +2112,8 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) { struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev); - amdgpu_ras_put_poison_req(obj->adev, - AMDGPU_RAS_BLOCK__UMC, 0, NULL, NULL, false); - atomic_inc(&con->page_retirement_req_cnt); + atomic_inc(&con->poison_creation_count); wake_up(&con->page_retirement_wq); } @@ -2799,7 +2804,8 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log) memset(&ecc_log->ecc_key, 0xad, sizeof(ecc_log->ecc_key)); INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL); - ecc_log->de_updated = false; + ecc_log->de_queried_count = 0; + ecc_log->prev_de_queried_count = 0; } static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) @@ -2818,7 +2824,8 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log) mutex_unlock(&ecc_log->lock); mutex_destroy(&ecc_log->lock); - ecc_log->de_updated = false; + ecc_log->de_queried_count = 0; + ecc_log->prev_de_queried_count = 0; } static void amdgpu_ras_do_page_retirement(struct work_struct *work) @@ -2850,60 +2857,116 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work) mutex_unlock(&con->umc_ecc_log.lock); } -static void amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, - uint32_t timeout_ms) +static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, + uint32_t poison_creation_count) { int ret = 0; struct ras_ecc_log_info *ecc_log; struct ras_query_if info; - uint32_t timeout = timeout_ms; + uint32_t timeout = 0; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + uint64_t de_queried_count; + uint32_t new_detect_count, total_detect_count; + uint32_t need_query_count = poison_creation_count; + bool query_data_timeout = false; memset(&info, 0, sizeof(info)); info.head.block = AMDGPU_RAS_BLOCK__UMC; ecc_log = &ras->umc_ecc_log; - ecc_log->de_updated = false; + total_detect_count = 0; do { ret = amdgpu_ras_query_error_status(adev, &info); - if (ret) { - dev_err(adev->dev, "Failed to query ras error! ret:%d\n", ret); - return; + if (ret) + return ret; + + de_queried_count = ecc_log->de_queried_count; + if (de_queried_count > ecc_log->prev_de_queried_count) { + new_detect_count = de_queried_count - ecc_log->prev_de_queried_count; + ecc_log->prev_de_queried_count = de_queried_count; + timeout = 0; + } else { + new_detect_count = 0; } - if (timeout && !ecc_log->de_updated) { - msleep(1); - timeout--; + if (new_detect_count) { + total_detect_count += new_detect_count; + } else { + if (!timeout && need_query_count) + timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC; + + if (timeout) { + if (!--timeout) { + query_data_timeout = true; + break; + } + msleep(1); + } } - } while (timeout && !ecc_log->de_updated); + } while (total_detect_count < need_query_count); - if (timeout_ms && !timeout) { - dev_warn(adev->dev, "Can't find deferred error\n"); - return; + if (query_data_timeout) { + dev_warn(adev->dev, "Can't find deferred error! count: %u\n", + (need_query_count - total_detect_count)); + return -ENOENT; } - if (!ret) + if (total_detect_count) schedule_delayed_work(&ras->page_retirement_dwork, 0); + + return 0; +} + +static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_poison_msg msg; + int ret; + + do { + ret = kfifo_get(&con->poison_fifo, &msg); + } while (ret); } static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev, - struct ras_poison_msg *poison_msg) + uint32_t msg_count, uint32_t *gpu_reset) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - uint32_t reset = poison_msg->reset; - uint16_t pasid = poison_msg->pasid; + uint32_t reset_flags = 0, reset = 0; + struct ras_poison_msg msg; + int ret, i; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (poison_msg->pasid_fn) - poison_msg->pasid_fn(adev, pasid, poison_msg->data); + for (i = 0; i < msg_count; i++) { + ret = amdgpu_ras_get_poison_req(adev, &msg); + if (!ret) + continue; + + if (msg.pasid_fn) + msg.pasid_fn(adev, msg.pasid, msg.data); + + reset_flags |= msg.reset; + } /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */ - if (reset && !con->is_rma) { + if (reset_flags && !con->is_rma) { + if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; + else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + else + reset = reset_flags; + flush_delayed_work(&con->page_retirement_dwork); con->gpu_reset_flags |= reset; amdgpu_ras_reset_gpu(adev); + + *gpu_reset = reset; + + /* Wait for gpu recovery to complete */ + flush_work(&con->recovery_work); } return 0; @@ -2913,9 +2976,9 @@ static int amdgpu_ras_page_retirement_thread(void *param) { struct amdgpu_device *adev = (struct amdgpu_device *)param; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_poison_msg poison_msg; - enum amdgpu_ras_block ras_block; - bool poison_creation_is_handled = false; + uint32_t poison_creation_count, msg_count; + uint32_t gpu_reset; + int ret; while (!kthread_should_stop()) { @@ -2926,33 +2989,61 @@ static int amdgpu_ras_page_retirement_thread(void *param) if (kthread_should_stop()) break; - atomic_dec(&con->page_retirement_req_cnt); + gpu_reset = 0; - if (!amdgpu_ras_get_poison_req(adev, &poison_msg)) - continue; + do { + poison_creation_count = atomic_read(&con->poison_creation_count); + ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count); + if (ret == -EIO) + break; - ras_block = poison_msg.block; + if (poison_creation_count) { + atomic_sub(poison_creation_count, &con->poison_creation_count); + atomic_sub(poison_creation_count, &con->page_retirement_req_cnt); + } + } while (atomic_read(&con->poison_creation_count)); + + if (ret != -EIO) { + msg_count = kfifo_len(&con->poison_fifo); + if (msg_count) { + ret = amdgpu_ras_poison_consumption_handler(adev, + msg_count, &gpu_reset); + if ((ret != -EIO) && + (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET)) + atomic_sub(msg_count, &con->page_retirement_req_cnt); + } + } - dev_dbg(adev->dev, "Start processing ras block %s(%d)\n", - ras_block_str(ras_block), ras_block); + if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) { + /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */ + /* Clear poison creation request */ + atomic_set(&con->poison_creation_count, 0); - if (ras_block == AMDGPU_RAS_BLOCK__UMC) { - amdgpu_ras_poison_creation_handler(adev, - MAX_UMC_POISON_POLLING_TIME_ASYNC); - poison_creation_is_handled = true; - } else { - /* poison_creation_is_handled: - * false: no poison creation interrupt, but it has poison - * consumption interrupt. - * true: It has poison creation interrupt at the beginning, - * but it has no poison creation interrupt later. - */ - amdgpu_ras_poison_creation_handler(adev, - poison_creation_is_handled ? - 0 : MAX_UMC_POISON_POLLING_TIME_ASYNC); + /* Clear poison fifo */ + amdgpu_ras_clear_poison_fifo(adev); + + /* Clear all poison requests */ + atomic_set(&con->page_retirement_req_cnt, 0); + + if (ret == -EIO) { + /* Wait for mode-1 reset to complete */ + down_read(&adev->reset_domain->sem); + up_read(&adev->reset_domain->sem); + } + + /* Wake up work to save bad pages to eeprom */ + schedule_delayed_work(&con->page_retirement_dwork, 0); + } else if (gpu_reset) { + /* gpu just completed mode-2 reset or other reset */ + /* Clear poison consumption messages cached in fifo */ + msg_count = kfifo_len(&con->poison_fifo); + if (msg_count) { + amdgpu_ras_clear_poison_fifo(adev); + atomic_sub(msg_count, &con->page_retirement_req_cnt); + } - amdgpu_ras_poison_consumption_handler(adev, &poison_msg); - poison_creation_is_handled = false; + /* Wake up work to save bad pages to eeprom */ + schedule_delayed_work(&con->page_retirement_dwork, 0); } } @@ -3026,6 +3117,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) mutex_init(&con->page_retirement_lock); init_waitqueue_head(&con->page_retirement_wq); atomic_set(&con->page_retirement_req_cnt, 0); + atomic_set(&con->poison_creation_count, 0); con->page_retirement_thread = kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement"); if (IS_ERR(con->page_retirement_thread)) { @@ -3074,6 +3166,7 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) kthread_stop(con->page_retirement_thread); atomic_set(&con->page_retirement_req_cnt, 0); + atomic_set(&con->poison_creation_count, 0); mutex_destroy(&con->page_rsv_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 83437fef9df5..0fa1148e6642 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -469,7 +469,8 @@ struct ras_ecc_log_info { struct mutex lock; siphash_key_t ecc_key; struct radix_tree_root de_page_tree; - bool de_updated; + uint64_t de_queried_count; + uint64_t prev_de_queried_count; }; struct amdgpu_ras { @@ -531,6 +532,7 @@ struct amdgpu_ras { wait_queue_head_t page_retirement_wq; struct mutex page_retirement_lock; atomic_t page_retirement_req_cnt; + atomic_t poison_creation_count; struct mutex page_rsv_lock; DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128); struct ras_ecc_log_info umc_ecc_log; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 3588f1c5a007..4c7b53648a50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -714,6 +714,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) return "RS64_MEC_P2_STACK"; case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: return "RS64_MEC_P3_STACK"; + case AMDGPU_UCODE_ID_ISP: + return "ISP"; default: return "UNKNOWN UCODE"; } @@ -1413,6 +1415,9 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, case VPE_HWIP: ip_name = "vpe"; break; + case ISP_HWIP: + ip_name = "isp"; + break; default: BUG(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index f4e5285c4dd6..5bc37acd3981 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -523,6 +523,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER, AMDGPU_UCODE_ID_P2S_TABLE, AMDGPU_UCODE_ID_JPEG_RAM, + AMDGPU_UCODE_ID_ISP, AMDGPU_UCODE_ID_MAXIMUM, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 20e0e522fb51..2f84bdb8c594 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -293,14 +293,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev, amdgpu_ras_error_data_fini(&err_data); } else { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - - amdgpu_ras_put_poison_req(adev, - block, pasid, pasid_fn, data, reset); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret; + ret = amdgpu_ras_put_poison_req(adev, + block, pasid, pasid_fn, data, reset); + if (!ret) { atomic_inc(&con->page_retirement_req_cnt); - wake_up(&con->page_retirement_wq); + } } } else { if (adev->virt.ops && adev->virt.ops->ras_poison_handler) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 63f2286858c4..ccb3d041c2b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -229,6 +229,22 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) adev->virt.mm_table.gpu_addr = 0; } +/** + * amdgpu_virt_rcvd_ras_interrupt() - receive ras interrupt + * @adev: amdgpu device. + * Check whether host sent RAS error message + * Return: true if found, otherwise false + */ +bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + + if (!virt->ops || !virt->ops->rcvd_ras_intr) + return false; + + return virt->ops->rcvd_ras_intr(adev); +} + unsigned int amd_sriov_msg_checksum(void *obj, unsigned long obj_size, @@ -612,11 +628,14 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) ret = amdgpu_virt_read_pf2vf_data(adev); if (ret) { adev->virt.vf2pf_update_retry_cnt++; - if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && - amdgpu_sriov_runtime(adev)) { + + if ((amdgpu_virt_rcvd_ras_interrupt(adev) || + adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && + amdgpu_sriov_runtime(adev)) { + amdgpu_ras_set_fed(adev, true); if (amdgpu_reset_domain_schedule(adev->reset_domain, - &adev->kfd.reset_work)) + &adev->kfd.reset_work)) return; else dev_err(adev->dev, "Failed to queue work! at %s", __func__); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f04cd1586c72..b42a8854dca0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -52,7 +52,7 @@ /* tonga/fiji use this offset */ #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 -#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 5 +#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2 enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, @@ -94,6 +94,7 @@ struct amdgpu_virt_ops { u32 data1, u32 data2, u32 data3); void (*ras_poison_handler)(struct amdgpu_device *adev, enum amdgpu_ras_block block); + bool (*rcvd_ras_intr)(struct amdgpu_device *adev); }; /* @@ -352,6 +353,7 @@ void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); +bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev); void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); void amdgpu_virt_exchange_data(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 2a510351dfce..5c17409439f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -611,10 +611,9 @@ static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char * { const struct psp_firmware_header_v1_0 *toc_hdr; int err = 0; - char fw_name[40]; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, + "amdgpu/%s_toc.bin", ucode_prefix); if (err) goto out; @@ -653,7 +652,6 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) { - char fw_name[40]; char ucode_prefix[25]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; @@ -663,9 +661,8 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, + "amdgpu/%s_pfp.bin", ucode_prefix); if (err) goto out; /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ @@ -681,8 +678,8 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, + "amdgpu/%s_me.bin", ucode_prefix); if (err) goto out; if (adev->gfx.rs64_enable) { @@ -696,10 +693,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) { if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) && adev->pdev->revision == 0xCE) - snprintf(fw_name, sizeof(fw_name), "amdgpu/gc_11_0_0_rlc_1.bin"); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + "amdgpu/gc_11_0_0_rlc_1.bin"); else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + "amdgpu/%s_rlc.bin", ucode_prefix); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -710,8 +708,8 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) goto out; } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, + "amdgpu/%s_mec.bin", ucode_prefix); if (err) goto out; if (adev->gfx.rs64_enable) { @@ -4500,11 +4498,11 @@ static int gfx_v11_0_hw_init(void *handle) /* RLC autoload sequence 1: Program rlc ram */ if (adev->gfx.imu.funcs->program_rlc_ram) adev->gfx.imu.funcs->program_rlc_ram(adev); + /* rlc autoload firmware */ + r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); + if (r) + return r; } - /* rlc autoload firmware */ - r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); - if (r) - return r; } else { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 460bf33a22b1..e9559bdd8264 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -386,10 +386,9 @@ static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char * { const struct psp_firmware_header_v1_0 *toc_hdr; int err = 0; - char fw_name[40]; - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, + "amdgpu/%s_toc.bin", ucode_prefix); if (err) goto out; @@ -407,7 +406,6 @@ out: static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) { - char fw_name[40]; char ucode_prefix[15]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; @@ -418,23 +416,23 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, + "amdgpu/%s_pfp.bin", ucode_prefix); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, + "amdgpu/%s_me.bin", ucode_prefix); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); if (!amdgpu_sriov_vf(adev)) { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + "amdgpu/%s_rlc.bin", ucode_prefix); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -445,8 +443,8 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) goto out; } - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); - err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, + "amdgpu/%s_mec.bin", ucode_prefix); if (err) goto out; amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 8d8763ebe027..1149595a02d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -55,6 +55,14 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin"); #define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */ #define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */ +#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */ +#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */ +#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */ +#define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */ + +#define NORMALIZE_XCC_REG_OFFSET(offset) \ + (offset & 0xFFFF) + struct amdgpu_gfx_ras gfx_v9_4_3_ras; static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); @@ -217,9 +225,24 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) } } +static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg) +{ + uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg); + + /* If it is an XCC reg, normalize the reg to keep + lower 16 bits in local xcc */ + + if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) || + ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH))) + return normalized_reg; + else + return reg; +} + static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, bool wc, uint32_t reg, uint32_t val) { + reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | WRITE_DATA_DST_SEL(0) | @@ -234,6 +257,12 @@ static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, uint32_t addr1, uint32_t ref, uint32_t mask, uint32_t inv) { + /* Only do the normalization on regspace */ + if (mem_space == 0) { + addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0); + addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1); + } + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, /* memory (1) or register (0) */ @@ -2725,6 +2754,8 @@ static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, { struct amdgpu_device *adev = ring->adev; + reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ (5 << 8) | /* dst: memory */ @@ -2742,6 +2773,8 @@ static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, { uint32_t cmd = 0; + reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); + switch (ring->funcs->type) { case AMDGPU_RING_TYPE_GFX: cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index e14acab5cceb..72109abe7c86 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -629,9 +629,11 @@ static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev, status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS); fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); - /* reset page fault status */ - WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), - regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + if (!amdgpu_sriov_vf(adev)) { + /* clear page fault status and address */ + WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), + regVM_L2_PROTECTION_FAULT_CNTL), 1, ~1); + } return fed; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index be78507ec0d8..fd3ac483760e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -464,10 +464,6 @@ static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); case AMDGPU_VM_MTYPE_NC: return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); - case AMDGPU_VM_MTYPE_WC: - return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_WC); - case AMDGPU_VM_MTYPE_CC: - return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_CC); case AMDGPU_VM_MTYPE_UC: return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC); default: diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 88b4644f8e96..b73136d390cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -672,7 +672,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) return 0; - WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + if (!amdgpu_sriov_vf(adev)) + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 3cb64c8f7175..18a761d6ef33 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -135,6 +135,34 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + + if (enable) { + /* Unset the CLEAR_OVERFLOW bit to make sure the next step + * is switching the bit from 0 to 1 + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Clear RB_OVERFLOW bit */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + } + /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 0fbf5fa7b0f8..2e0469feca1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -535,6 +535,12 @@ static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev) static int ih_v6_1_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = amdgpu_irq_add_domain(adev); + if (ret) { + return ret; + } ih_v6_1_set_interrupt_funcs(adev); ih_v6_1_set_self_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c new file mode 100644 index 000000000000..aac107898bae --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#include "amdgpu.h" +#include "isp_v4_1_0.h" + +static const unsigned int isp_4_1_0_int_srcid[MAX_ISP410_INT_SRC] = { + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT11, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT12, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT13, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT14, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT15, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16 +}; + +static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp) +{ + struct amdgpu_device *adev = isp->adev; + u64 isp_base; + int int_idx; + int r; + + if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) + return -EINVAL; + + isp_base = adev->rmmio_base; + + isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL); + if (!isp->isp_cell) { + r = -ENOMEM; + DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__); + goto failure; + } + + isp->isp_res = kcalloc(MAX_ISP410_INT_SRC + 1, sizeof(struct resource), + GFP_KERNEL); + if (!isp->isp_res) { + r = -ENOMEM; + DRM_ERROR("%s: isp mfd res alloc failed\n", __func__); + goto failure; + } + + isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL); + if (!isp->isp_pdata) { + r = -ENOMEM; + DRM_ERROR("%s: isp platform data alloc failed\n", __func__); + goto failure; + } + + /* initialize isp platform data */ + isp->isp_pdata->adev = (void *)adev; + isp->isp_pdata->asic_type = adev->asic_type; + isp->isp_pdata->base_rmmio_size = adev->rmmio_size; + + isp->isp_res[0].name = "isp_4_1_0_reg"; + isp->isp_res[0].flags = IORESOURCE_MEM; + isp->isp_res[0].start = isp_base; + isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END; + + for (int_idx = 0; int_idx < MAX_ISP410_INT_SRC; int_idx++) { + isp->isp_res[int_idx + 1].name = "isp_4_1_0_irq"; + isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ; + isp->isp_res[int_idx + 1].start = + amdgpu_irq_create_mapping(adev, isp_4_1_0_int_srcid[int_idx]); + isp->isp_res[int_idx + 1].end = + isp->isp_res[int_idx + 1].start; + } + + isp->isp_cell[0].name = "amd_isp_capture"; + isp->isp_cell[0].num_resources = MAX_ISP410_INT_SRC + 1; + isp->isp_cell[0].resources = &isp->isp_res[0]; + isp->isp_cell[0].platform_data = isp->isp_pdata; + isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data); + + r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1); + if (r) { + DRM_ERROR("%s: add mfd hotplug device failed\n", __func__); + goto failure; + } + + return 0; + +failure: + + kfree(isp->isp_pdata); + kfree(isp->isp_res); + kfree(isp->isp_cell); + + return r; +} + +static int isp_v4_1_0_hw_fini(struct amdgpu_isp *isp) +{ + mfd_remove_devices(isp->parent); + + kfree(isp->isp_res); + kfree(isp->isp_cell); + kfree(isp->isp_pdata); + + return 0; +} + +static const struct isp_funcs isp_v4_1_0_funcs = { + .hw_init = isp_v4_1_0_hw_init, + .hw_fini = isp_v4_1_0_hw_fini, +}; + +void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp) +{ + isp->funcs = &isp_v4_1_0_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h new file mode 100644 index 000000000000..315f2822410c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#ifndef __ISP_V4_1_0_H__ +#define __ISP_V4_1_0_H__ + +#include "amdgpu_isp.h" + +#include "ivsrcid/isp/irqsrcs_isp_4_1.h" + +#define MAX_ISP410_INT_SRC 8 + +void isp_v4_1_0_set_isp_funcs(struct amdgpu_isp *isp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c new file mode 100644 index 000000000000..4e17fa03f7b5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#include "amdgpu.h" +#include "isp_v4_1_1.h" + +static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = { + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT11, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT12, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT13, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT14, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT15, + ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16 +}; + +static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) +{ + struct amdgpu_device *adev = isp->adev; + u64 isp_base; + int int_idx; + int r; + + if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) + return -EINVAL; + + isp_base = adev->rmmio_base; + + isp->isp_cell = kcalloc(1, sizeof(struct mfd_cell), GFP_KERNEL); + if (!isp->isp_cell) { + r = -ENOMEM; + DRM_ERROR("%s: isp mfd cell alloc failed\n", __func__); + goto failure; + } + + isp->isp_res = kcalloc(MAX_ISP411_INT_SRC + 1, sizeof(struct resource), + GFP_KERNEL); + if (!isp->isp_res) { + r = -ENOMEM; + DRM_ERROR("%s: isp mfd res alloc failed\n", __func__); + goto failure; + } + + isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL); + if (!isp->isp_pdata) { + r = -ENOMEM; + DRM_ERROR("%s: isp platform data alloc failed\n", __func__); + goto failure; + } + + /* initialize isp platform data */ + isp->isp_pdata->adev = (void *)adev; + isp->isp_pdata->asic_type = adev->asic_type; + isp->isp_pdata->base_rmmio_size = adev->rmmio_size; + + isp->isp_res[0].name = "isp_4_1_1_reg"; + isp->isp_res[0].flags = IORESOURCE_MEM; + isp->isp_res[0].start = isp_base; + isp->isp_res[0].end = isp_base + ISP_REGS_OFFSET_END; + + for (int_idx = 0; int_idx < MAX_ISP411_INT_SRC; int_idx++) { + isp->isp_res[int_idx + 1].name = "isp_4_1_1_irq"; + isp->isp_res[int_idx + 1].flags = IORESOURCE_IRQ; + isp->isp_res[int_idx + 1].start = + amdgpu_irq_create_mapping(adev, isp_4_1_1_int_srcid[int_idx]); + isp->isp_res[int_idx + 1].end = + isp->isp_res[int_idx + 1].start; + } + + isp->isp_cell[0].name = "amd_isp_capture"; + isp->isp_cell[0].num_resources = MAX_ISP411_INT_SRC + 1; + isp->isp_cell[0].resources = &isp->isp_res[0]; + isp->isp_cell[0].platform_data = isp->isp_pdata; + isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data); + + r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 1); + if (r) { + DRM_ERROR("%s: add mfd hotplug device failed\n", __func__); + goto failure; + } + + return 0; + +failure: + + kfree(isp->isp_pdata); + kfree(isp->isp_res); + kfree(isp->isp_cell); + + return r; +} + +static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp) +{ + mfd_remove_devices(isp->parent); + + kfree(isp->isp_res); + kfree(isp->isp_cell); + kfree(isp->isp_pdata); + + return 0; +} + +static const struct isp_funcs isp_v4_1_1_funcs = { + .hw_init = isp_v4_1_1_hw_init, + .hw_fini = isp_v4_1_1_hw_fini, +}; + +void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp) +{ + isp->funcs = &isp_v4_1_1_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h new file mode 100644 index 000000000000..dfb9522c9d6a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#ifndef __ISP_V4_1_1_H__ +#define __ISP_V4_1_1_H__ + +#include "amdgpu_isp.h" + +#include "ivsrcid/isp/irqsrcs_isp_4_1.h" + +#define MAX_ISP411_INT_SRC 8 + +void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index 68ef29bc70e2..d694a276498a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -31,6 +31,7 @@ #include "vcn/vcn_5_0_0_offset.h" #include "vcn/vcn_5_0_0_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" +#include "jpeg_v5_0_0.h" static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev); @@ -137,9 +138,9 @@ static int jpeg_v5_0_0_hw_init(void *handle) adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); - WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, - ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | - VCN_JPEG_DB_CTRL__EN_MASK); + /* Skip ring test because pause DPG is not implemented. */ + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) + return 0; r = amdgpu_ring_test_helper(ring); if (r) @@ -239,7 +240,7 @@ static void jpeg_v5_0_0_enable_clock_gating(struct amdgpu_device *adev) WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data); } -static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev) +static int jpeg_v5_0_0_disable_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; @@ -252,14 +253,10 @@ static int jpeg_v5_0_0_disable_static_power_gating(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - /* keep the JPEG in static PG mode */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK); - return 0; } -static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev) +static int jpeg_v5_0_0_enable_power_gating(struct amdgpu_device *adev) { /* enable anti hang mechanism */ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), @@ -277,6 +274,121 @@ static int jpeg_v5_0_0_enable_static_power_gating(struct amdgpu_device *adev) return 0; } +static void jpeg_engine_5_0_0_dpg_clock_gating_mode(struct amdgpu_device *adev, + int inst_idx, uint8_t indirect) +{ + uint32_t data = 0; + + // JPEG disable CGC + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + + if (indirect) { + ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect); + + // Turn on All JPEG clocks + data = 0; + ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_GATE, data, indirect); + } else { + WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect); + + // Turn on All JPEG clocks + data = 0; + WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_GATE, data, indirect); + } +} + +/** + * jpeg_v5_0_0_start_dpg_mode - Jpeg start with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * @indirect: indirectly write sram + * + * Start JPEG block with dpg mode + */ +static int jpeg_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +{ + struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec; + uint32_t reg_data = 0; + + jpeg_v5_0_0_enable_power_gating(adev); + + // enable dynamic power gating mode + reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS); + reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK; + WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data); + + if (indirect) + adev->jpeg.inst[inst_idx].dpg_sram_curr_addr = + (uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr; + + jpeg_engine_5_0_0_dpg_clock_gating_mode(adev, inst_idx, indirect); + + /* MJPEG global tiling registers */ + if (indirect) + ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, indirect); + else + WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 1); + + /* enable System Interrupt for JRBC */ + if (indirect) + ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_SYS_INT_EN, + JPEG_SYS_INT_EN__DJRBC0_MASK, indirect); + else + WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_SYS_INT_EN, + JPEG_SYS_INT_EN__DJRBC0_MASK, 1); + + if (indirect) { + /* add nop to workaround PSP size check */ + ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipUVD_NO_OP, 0, indirect); + + amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0); + } + + WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); + + WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v5_0_0_stop_dpg_mode - Jpeg stop with dpg mode + * + * @adev: amdgpu_device pointer + * @inst_idx: instance number index + * + * Stop JPEG block with dpg mode + */ +static void jpeg_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +{ + uint32_t reg_data = 0; + + reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS); + reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK; + WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data); +} + /** * jpeg_v5_0_0_start - start JPEG block * @@ -292,8 +404,13 @@ static int jpeg_v5_0_0_start(struct amdgpu_device *adev) if (adev->pm.dpm_enabled) amdgpu_dpm_enable_jpeg(adev, true); + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { + r = jpeg_v5_0_0_start_dpg_mode(adev, 0, adev->jpeg.indirect_sram); + return r; + } + /* disable power gating */ - r = jpeg_v5_0_0_disable_static_power_gating(adev); + r = jpeg_v5_0_0_disable_power_gating(adev); if (r) return r; @@ -304,7 +421,6 @@ static int jpeg_v5_0_0_start(struct amdgpu_device *adev) WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - /* enable JMI channel */ WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0, ~UVD_JMI_CNTL__SOFT_RESET_MASK); @@ -314,6 +430,10 @@ static int jpeg_v5_0_0_start(struct amdgpu_device *adev) JPEG_SYS_INT_EN__DJRBC0_MASK, ~JPEG_SYS_INT_EN__DJRBC0_MASK); + WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); + WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0); WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW, @@ -340,17 +460,22 @@ static int jpeg_v5_0_0_stop(struct amdgpu_device *adev) { int r; - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { + jpeg_v5_0_0_stop_dpg_mode(adev, 0); + } else { - jpeg_v5_0_0_enable_clock_gating(adev); + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); - /* enable power gating */ - r = jpeg_v5_0_0_enable_static_power_gating(adev); - if (r) - return r; + jpeg_v5_0_0_enable_clock_gating(adev); + + /* enable power gating */ + r = jpeg_v5_0_0_enable_power_gating(adev); + if (r) + return r; + } if (adev->pm.dpm_enabled) amdgpu_dpm_enable_jpeg(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h index bd348336b215..5abb96159814 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h @@ -24,6 +24,12 @@ #ifndef __JPEG_V5_0_0_H__ #define __JPEG_V5_0_0_H__ +#define vcnipJPEG_CGC_GATE 0x4160 +#define vcnipJPEG_CGC_CTRL 0x4161 +#define vcnipJPEG_SYS_INT_EN 0x4141 +#define vcnipUVD_NO_OP 0x0029 +#define vcnipJPEG_DEC_GFX10_ADDR_CONFIG 0x404A + extern const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block; #endif /* __JPEG_V5_0_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 7a1ff298417a..8d7267a013d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -566,9 +566,11 @@ static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev, status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS); fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); - /* reset page fault status */ - WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, - regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + if (!amdgpu_sriov_vf(adev)) { + /* clear page fault status and address */ + WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, + regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + } return fed; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 6b71ee85ee65..f5411b798e11 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -93,7 +93,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev) timeout -= 5; } while (timeout > 1); - pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT); + dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT); return -ETIME; } @@ -111,7 +111,7 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) timeout -= 10; } while (timeout > 1); - pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r); return -ETIME; } @@ -132,7 +132,7 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_ai_mailbox_set_valid(adev, false); trn = xgpu_ai_peek_ack(adev); if (trn) { - pr_err("trn=%x ACK should not assert! wait again !\n", trn); + dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn); msleep(1); } } while(trn); @@ -155,7 +155,7 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, /* start to poll ack */ r = xgpu_ai_poll_ack(adev); if (r) - pr_err("Doesn't get ack from pf, continue\n"); + dev_err(adev->dev, "Doesn't get ack from pf, continue\n"); xgpu_ai_mailbox_set_valid(adev, false); } @@ -173,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, req == IDH_REQ_GPU_RESET_ACCESS) { r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); if (r) { - pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); + dev_err(adev->dev, "Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); return r; } /* Retrieve checksum from mailbox2 */ @@ -231,7 +231,7 @@ static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - DRM_DEBUG("get ack intr and do nothing.\n"); + dev_dbg(adev->dev, "get ack intr and do nothing.\n"); return 0; } @@ -258,12 +258,15 @@ static int xgpu_ai_wait_reset(struct amdgpu_device *adev) { int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; do { - if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) + if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) { + dev_dbg(adev->dev, "Got AI IDH_FLR_NOTIFICATION_CMPL after %d ms\n", AI_MAILBOX_POLL_FLR_TIMEDOUT - timeout); return 0; + } msleep(10); timeout -= 10; } while (timeout > 1); - dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + + dev_dbg(adev->dev, "waiting AI IDH_FLR_NOTIFICATION_CMPL timeout\n"); return -ETIME; } @@ -405,6 +408,13 @@ static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev, xgpu_ai_send_access_requests(adev, IDH_RAS_POISON); } +static bool xgpu_ai_rcvd_ras_intr(struct amdgpu_device *adev) +{ + enum idh_event msg = xgpu_ai_mailbox_peek_msg(adev); + + return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF); +} + const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, @@ -414,4 +424,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .trans_msg = xgpu_ai_mailbox_trans_msg, .req_init_data = xgpu_ai_request_init_data, .ras_poison_handler = xgpu_ai_ras_poison_handler, + .rcvd_ras_intr = xgpu_ai_rcvd_ras_intr, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index c520b2fabfb9..ed57cbc150af 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -51,7 +51,9 @@ enum idh_event { IDH_FAIL, IDH_QUERY_ALIVE, IDH_REQ_GPU_INIT_DATA_READY, - + IDH_RAS_POISON_READY, + IDH_PF_SOFT_FLR_NOTIFICATION, + IDH_RAS_ERROR_DETECTED, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 22af30a15a5f..f47bd7ada4d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -91,7 +91,7 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev) timeout -= 5; } while (timeout > 1); - pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT); + dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT); return -ETIME; } @@ -106,13 +106,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) do { r = xgpu_nv_mailbox_rcv_msg(adev, event); - if (!r) + if (!r) { + dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now); return 0; + } msleep(10); now = (uint64_t)ktime_to_ms(ktime_get()); } while (timeout > now); + dev_dbg(adev->dev, "nv_poll_msg timed out\n"); return -ETIME; } @@ -133,11 +136,12 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_nv_mailbox_set_valid(adev, false); trn = xgpu_nv_peek_ack(adev); if (trn) { - pr_err("trn=%x ACK should not assert! wait again !\n", trn); + dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn); msleep(1); } } while (trn); + dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1); WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req); WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1); WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2); @@ -147,7 +151,7 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, /* start to poll ack */ r = xgpu_nv_poll_ack(adev); if (r) - pr_err("Doesn't get ack from pf, continue\n"); + dev_err(adev->dev, "Doesn't get ack from pf, continue\n"); xgpu_nv_mailbox_set_valid(adev, false); } @@ -185,7 +189,7 @@ send_request: goto send_request; if (req != IDH_REQ_GPU_INIT_DATA) { - pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r); return r; } else /* host doesn't support REQ_GPU_INIT_DATA handshake */ adev->virt.req_init_data_ver = 0; @@ -261,7 +265,7 @@ static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - DRM_DEBUG("get ack intr and do nothing.\n"); + dev_dbg(adev->dev, "get ack intr and do nothing.\n"); return 0; } @@ -291,12 +295,15 @@ static int xgpu_nv_wait_reset(struct amdgpu_device *adev) { int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; do { - if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) + if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) { + dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout); return 0; + } msleep(10); timeout -= 10; } while (timeout > 1); - dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + + dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n"); return -ETIME; } @@ -442,6 +449,13 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, } } +static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev) +{ + enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev); + + return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF); +} + const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .req_full_gpu = xgpu_nv_request_full_gpu_access, .rel_full_gpu = xgpu_nv_release_full_gpu_access, @@ -451,4 +465,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = { .wait_reset = xgpu_nv_wait_reset, .trans_msg = xgpu_nv_mailbox_trans_msg, .ras_poison_handler = xgpu_nv_ras_poison_handler, + .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 1e8fd90cab43..caf616a2c8a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -26,7 +26,7 @@ #define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 #define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000 -#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000 +#define NV_MAILBOX_POLL_FLR_TIMEDOUT 10000 #define NV_MAILBOX_POLL_MSG_REP_MAX 11 enum idh_request { @@ -52,7 +52,8 @@ enum idh_event { IDH_QUERY_ALIVE, IDH_REQ_GPU_INIT_DATA_READY, IDH_RAS_POISON_READY, - + IDH_PF_SOFT_FLR_NOTIFICATION, + IDH_RAS_ERROR_DETECTED, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c index 5a20bb229788..39919e0892c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c @@ -345,6 +345,7 @@ static void nbif_v6_3_1_program_aspm(struct amdgpu_device *adev) { #ifdef CONFIG_PCIEASPM uint32_t def, data; + u16 devctl2, ltr; def = data = RREG32_SOC15(PCIE, 0, regPCIE_LC_CNTL); data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; @@ -374,12 +375,17 @@ static void nbif_v6_3_1_program_aspm(struct amdgpu_device *adev) if (def != data) WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data); - def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); - data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + pcie_capability_read_word(adev->pdev, PCI_EXP_DEVCTL2, &devctl2); + data = def = devctl2; + data &= ~PCI_EXP_DEVCTL2_LTR_EN; if (def != data) - WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); + pcie_capability_set_word(adev->pdev, PCI_EXP_DEVCTL2, (u16)data); + + ltr = pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_LTR); - WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); + if (ltr) { + pci_write_config_dword(adev->pdev, ltr + PCI_LTR_MAX_SNOOP_LAT, 0x10011001); + } #if 0 /* regPSWUSP0_PCIE_LC_CNTL2 should be replace by PCIE_LC_CNTL2 or someone else ? */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index a15673e2dc99..d27fb4ea6612 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -428,6 +428,7 @@ static int soc24_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_JPEG | + AMD_PG_SUPPORT_JPEG_DPG | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x50; break; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 6d6350f220b0..0faa21d8a7b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -557,7 +557,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err); if (ret) { if (ret == -EEXIST) - con->umc_ecc_log.de_updated = true; + con->umc_ecc_log.de_queried_count++; else dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret); @@ -566,7 +566,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev, return ret; } - con->umc_ecc_log.de_updated = true; + con->umc_ecc_log.de_queried_count++; return 0; } |