summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1791
1 files changed, 1243 insertions, 548 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 97105a5bb246..7f944bb11298 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -38,7 +38,6 @@
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
-#include "hdp/hdp_4_0_offset.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
@@ -48,18 +47,19 @@
#include "amdgpu_ras.h"
+#include "gfx_v9_4.h"
+#include "gfx_v9_0.h"
+#include "gfx_v9_4_2.h"
+
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+#include "asic_reg/gc/gc_9_0_default.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
-#define mmPWR_MISC_CNTL_STATUS 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
-
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
@@ -108,16 +108,25 @@ MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
-MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
MODULE_FIRMWARE("amdgpu/renoir_me.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
-MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
+
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
@@ -131,18 +140,6 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
-struct ras_gfx_subblock_reg {
- const char *name;
- uint32_t hwip;
- uint32_t inst;
- uint32_t seg;
- uint32_t reg_offset;
- uint32_t sec_count_mask;
- uint32_t sec_count_shift;
- uint32_t ded_count_mask;
- uint32_t ded_count_shift;
-};
-
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -521,8 +518,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
@@ -705,6 +702,12 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
+};
+
+static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
+ {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
+ {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -731,6 +734,75 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
+static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ if (offset == grbm_cntl || offset == grbm_idx) {
+ if (offset == grbm_cntl)
+ writel(v, scratch_reg2);
+ else if (offset == grbm_idx)
+ writel(v, scratch_reg3);
+
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+ }
+
+}
+
+static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
+ u32 v, u32 acc_flags, u32 hwip)
+{
+ if ((acc_flags & AMDGPU_REGS_RLC) &&
+ amdgpu_sriov_fullaccess(adev)) {
+ gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
+
+ return;
+ }
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, v);
+ else
+ WREG32(offset, v);
+}
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -741,20 +813,148 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
- struct amdgpu_cu_info *cu_info);
+ struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
+
+static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
+ uint64_t queue_mask)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_SET_RESOURCES_VMID_MASK(0) |
+ /* vmid_mask:0* queue_type:0 (KIQ) */
+ PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
+ amdgpu_ring_write(kiq_ring,
+ lower_32_bits(queue_mask)); /* queue mask lo */
+ amdgpu_ring_write(kiq_ring,
+ upper_32_bits(queue_mask)); /* queue mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* oac mask */
+ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
+}
+
+static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
+ PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
+ PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
+ /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
+ /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
+ PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
+ /* num_queues: must be 1 */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+}
+
+static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(action) |
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+
+ if (action == PREEMPT_QUEUES_NO_UNMAP) {
+ amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, seq);
+ } else {
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+}
+
+static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ u64 addr,
+ u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
+ PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
+ PACKET3_QUERY_STATUS_COMMAND(2));
+ /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
+ PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
+}
+
+static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
+static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
+ .kiq_set_resources = gfx_v9_0_kiq_set_resources,
+ .kiq_map_queues = gfx_v9_0_kiq_map_queues,
+ .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
+ .kiq_query_status = gfx_v9_0_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
+ .set_resources_size = 8,
+ .map_queues_size = 7,
+ .unmap_queues_size = 6,
+ .query_status_size = 7,
+ .invalidate_tlbs_size = 2,
+};
+
+static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+}
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
@@ -762,7 +962,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg10,
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_2_1,
ARRAY_SIZE(golden_settings_gc_9_2_1));
@@ -770,7 +970,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_2_1_vg12,
ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
@@ -778,15 +978,16 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_4_1_arct,
ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv2,
ARRAY_SIZE(golden_settings_gc_9_1_rv2));
@@ -795,16 +996,21 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rn,
ARRAY_SIZE(golden_settings_gc_9_1_rn));
return; /* for renoir, don't need common goldensetting */
+ case IP_VERSION(9, 4, 2):
+ gfx_v9_4_2_init_golden_registers(adev,
+ adev->smuio.funcs->get_die_id(adev));
+ break;
default:
break;
}
- if (adev->asic_type != CHIP_ARCTURUS)
+ if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
+ (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)))
soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -905,7 +1111,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -987,15 +1194,15 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
- if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
+ ((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
- (adev->gfx.pfp_feature_version < 46))
- DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
- GRBM requires 1-cycle delay in cp firmware\n");
+ (adev->gfx.pfp_feature_version < 46)))
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
@@ -1006,7 +1213,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 42))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
@@ -1017,7 +1224,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
@@ -1028,7 +1235,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
@@ -1040,22 +1248,83 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.mec_fw_write_wait = true;
break;
default:
+ adev->gfx.me_fw_write_wait = true;
+ adev->gfx.mec_fw_write_wait = true;
break;
}
}
+struct amdgpu_gfxoff_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
+ { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
+ /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
+{
+ const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
+static bool is_raven_kicker(struct amdgpu_device *adev)
+{
+ if (adev->pm.fw_version >= 0x41e2b)
+ return true;
+ else
+ return false;
+}
+
+static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
+{
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) &&
+ (adev->gfx.me_fw_version >= 0x000000a5) &&
+ (adev->gfx.me_feature_version >= 52))
+ return true;
+ else
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
break;
- case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 ||
- adev->pdev->device == 0x15d8) &&
- (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
- !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
+ ((!is_raven_kicker(adev) &&
+ adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
@@ -1063,7 +1332,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
@@ -1285,6 +1554,16 @@ out:
return err;
}
+static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
+{
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0))
+ return false;
+
+ return true;
+}
+
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
@@ -1306,21 +1585,26 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (!err) {
+ err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ } else {
+ err = 0;
+ adev->gfx.mec2_fw = NULL;
+ }
} else {
- err = 0;
- adev->gfx.mec2_fw = NULL;
+ adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
+ adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
@@ -1349,8 +1633,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
/* TODO: Determine if MEC2 JT FW loading can be removed
for all GFX V9 asic and above */
- if (adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_RENOIR) {
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
info->fw = adev->gfx.mec2_fw;
@@ -1383,36 +1666,43 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
chip_name = "vega10";
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
chip_name = "vega12";
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
chip_name = "vega20";
break;
- case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
chip_name = "arcturus";
break;
- case CHIP_RENOIR:
- chip_name = "renoir";
+ case IP_VERSION(9, 3, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
+ break;
+ case IP_VERSION(9, 4, 2):
+ chip_name = "aldebaran";
break;
default:
BUG();
}
/* No CPG in Arcturus */
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
if (r)
return r;
@@ -1508,7 +1798,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
always_on_cu_num = 4;
- else if (adev->asic_type == CHIP_VEGA12)
+ else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1))
always_on_cu_num = 8;
else
always_on_cu_num = 12;
@@ -1647,7 +1937,10 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- return 5;
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev))
+ return 5;
+ else
+ return 4;
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
@@ -1666,7 +1959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return r;
}
- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
+ if (adev->flags & AMD_IS_APU) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
r = amdgpu_gfx_rlc_init_cpt(adev);
@@ -1674,17 +1967,22 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return r;
}
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
gfx_v9_0_init_lbpw(adev);
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
gfx_v9_4_init_lbpw(adev);
break;
default:
break;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1710,29 +2008,30 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v9_0_mec_fini(adev);
+ return r;
+ }
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
-
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
@@ -1755,7 +2054,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1767,7 +2066,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1796,6 +2095,7 @@ static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+ dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}
static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
@@ -1824,14 +2124,20 @@ static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_0_select_se_sh,
- .read_wave_data = &gfx_v9_0_read_wave_data,
- .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_0_select_se_sh,
+ .read_wave_data = &gfx_v9_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+};
+
+static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
+ .ras_late_init = amdgpu_gfx_ras_late_init,
+ .ras_fini = amdgpu_gfx_ras_fini,
.ras_error_inject = &gfx_v9_0_ras_error_inject,
- .query_ras_error_count = &gfx_v9_0_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1841,8 +2147,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1850,7 +2156,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1859,7 +2165,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n");
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
+ adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1873,18 +2180,20 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
if (err)
return err;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
+ adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1894,7 +2203,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1904,6 +2213,21 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22010042;
break;
+ case IP_VERSION(9, 4, 2):
+ adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+ gb_addr_config &= ~0xf3e777ff;
+ gb_addr_config |= 0x22014042;
+ /* check vbios table if gpu info is not available */
+ err = amdgpu_atomfirmware_get_gfx_info(adev);
+ if (err)
+ return err;
+ break;
default:
BUG();
break;
@@ -1952,9 +2276,9 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
- int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -1973,15 +2297,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
-
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
- if (r)
- return r;
-
-
- return 0;
+ return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
}
static int gfx_v9_0_sw_init(void *handle)
@@ -1991,13 +2311,15 @@ static int gfx_v9_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->gfx.mec.num_mec = 2;
break;
default:
@@ -2069,8 +2391,9 @@ static int gfx_v9_0_sw_init(void *handle)
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -2125,7 +2448,9 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_gfx_ras_fini(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_fini)
+ adev->gfx.ras_funcs->ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2138,7 +2463,7 @@ static int gfx_v9_0_sw_fini(void *handle)
gfx_v9_0_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
+ if (adev->flags & AMD_IS_APU) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
@@ -2154,7 +2479,8 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
/* TODO */
}
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+ u32 instance)
{
u32 data;
@@ -2217,8 +2543,6 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
}
#define DEFAULT_SH_MEM_BASES (0x6000)
-#define FIRST_COMPUTE_VMID (8)
-#define LAST_COMPUTE_VMID (16)
static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
@@ -2238,7 +2562,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
mutex_lock(&adev->srbm_mutex);
- for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
@@ -2249,7 +2573,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
acccess. These should be enabled by FW for target VMIDs. */
- for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
@@ -2267,7 +2591,7 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
* the driver can enable them for graphics. VMID0 should maintain
* access so that HWS firmware can save/restore entries.
*/
- for (vmid = 1; vmid < 16; vmid++) {
+ for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
@@ -2275,6 +2599,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 1):
+ tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
+ tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
+ DISABLE_BARRIER_WAITCNT, 1);
+ WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
@@ -2298,14 +2638,14 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
- !!amdgpu_noretry);
+ !!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
- !!amdgpu_noretry);
+ !!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
@@ -2320,6 +2660,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
gfx_v9_0_init_compute_vmid(adev);
gfx_v9_0_init_gds_vmid(adev);
+ gfx_v9_0_init_sq_config(adev);
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
@@ -2363,12 +2704,17 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
+ u32 tmp;
+
+ /* These interrupts should be enabled to drive DS clock */
+
+ tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
+ if(adev->gfx.num_gfx_rings)
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
@@ -2537,7 +2883,7 @@ static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
- if (enable == true) {
+ if (enable) {
/* enable GFXIP control over CGPG */
data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
if(default_data != data)
@@ -2593,8 +2939,8 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
-
- pwr_10_0_gfxip_control_over_cgpg(adev, true);
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0))
+ pwr_10_0_gfxip_control_over_cgpg(adev, true);
}
}
@@ -2705,9 +3051,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
- if (adev->asic_type == CHIP_VEGA12 ||
- (adev->asic_type == CHIP_RAVEN &&
- adev->rev_id >= 8))
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -2724,7 +3069,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
}
}
-void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
@@ -2819,14 +3164,15 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (amdgpu_lbpw == 0)
gfx_v9_0_enable_lbpw(adev, false);
else
gfx_v9_0_enable_lbpw(adev, true);
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
@@ -2843,16 +3189,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (!enable) {
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
}
@@ -3048,15 +3389,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -3119,72 +3456,17 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
-static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
+static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- uint64_t queue_mask = 0;
- int r, i;
-
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
- continue;
+ struct amdgpu_device *adev = ring->adev;
- /* This situation may be hit in the future if a new HW
- * generation exposes more than 64 queues. If so, the
- * definition of queue_mask needs updating */
- if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
- break;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
}
-
- queue_mask |= (1ull << i);
}
-
- r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- /* set resources */
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
- amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
- PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
- amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
- amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* oac mask */
- amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
- uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
- /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
- PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
- PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
- PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
- PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
- PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
- PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
- PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
- amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
- amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
- amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
- }
-
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ enable failed\n");
-
- return r;
}
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
@@ -3323,8 +3605,15 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
- /* activate the queue */
- mqd->cp_hqd_active = 1;
+ /* set static priority for a queue/ring */
+ gfx_v9_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
+
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
return 0;
}
@@ -3405,7 +3694,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
- WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+ /* If GC has entered CGPG, ringing doorbell > first page
+ * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
+ * workaround this issue. And this change has to align with firmware
+ * update.
+ */
+ if (check_if_enlarge_doorbell_range(adev))
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+ (adev->doorbell.size - 4));
+ else
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell_index.userqueue_end * 2) << 2);
}
@@ -3478,11 +3776,18 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
+ struct v9_mqd *tmp_mqd;
gfx_v9_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ /* GPU could be in bad state during probe, driver trigger the reset
+ * after load the SMU, in this case , the mqd is not be initialized.
+ * driver need to re-init the mqd.
+ * check mqd->cp_hqd_pq_control since this value should not be 0
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+ if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
+ /* for GPU_RESET case , reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3518,8 +3823,15 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
+ struct v9_mqd *tmp_mqd;
+
+ /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
+ * is not be initialized before
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3531,13 +3843,14 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
@@ -3593,7 +3906,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = gfx_v9_0_kiq_kcq_enable(adev);
+ r = amdgpu_gfx_enable_kcq(adev);
done:
return r;
}
@@ -3607,7 +3920,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
/* legacy firmware loading */
r = gfx_v9_0_cp_gfx_load_microcode(adev);
if (r)
@@ -3623,7 +3936,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_cp_gfx_resume(adev);
if (r)
return r;
@@ -3633,7 +3946,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -3650,9 +3963,27 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) &&
+ adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
+ adev->df.hash_status.hash_64k);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
+ adev->df.hash_status.hash_2m);
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
+ adev->df.hash_status.hash_1g);
+ WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
+}
+
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
- if (adev->asic_type != CHIP_ARCTURUS)
+ if (adev->gfx.num_gfx_rings)
gfx_v9_0_cp_gfx_enable(adev, enable);
gfx_v9_0_cp_compute_enable(adev, enable);
}
@@ -3667,6 +3998,8 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_constants_init(adev);
+ gfx_v9_0_init_tcp_config(adev);
+
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3675,35 +4008,8 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- return r;
-}
-
-static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
-{
- int r, i;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
-
- r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
- if (r)
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
-
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
-
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
- PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
- PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
- PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
- PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
- amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- amdgpu_ring_write(kiq_ring, 0);
- }
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ disable failed\n");
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ gfx_v9_4_2_set_power_brake_sequence(adev);
return r;
}
@@ -3719,7 +4025,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
/* disable KCQ to avoid CPC touch memory not valid anymore */
- gfx_v9_0_kcq_disable(adev);
+ amdgpu_gfx_disable_kcq(adev);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3735,7 +4041,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back
*/
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe,
@@ -3746,8 +4052,14 @@ static int gfx_v9_0_hw_fini(void *handle)
}
gfx_v9_0_cp_enable(adev, false);
- adev->gfx.rlc.funcs->stop(adev);
+ /* Skip suspend with A+A reset */
+ if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
+ dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+ return 0;
+ }
+
+ adev->gfx.rlc.funcs->stop(adev);
return 0;
}
@@ -3821,7 +4133,7 @@ static int gfx_v9_0_soft_reset(void *handle)
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
- if (adev->asic_type != CHIP_ARCTURUS)
+ if (adev->gfx.num_gfx_rings)
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3848,28 +4160,95 @@ static int gfx_v9_0_soft_reset(void *handle)
return 0;
}
+static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq, reg_val_offs = 0;
+ uint64_t value = 0;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 9 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 16) | /* count sel */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ reg_val_offs * 4));
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev)))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ mb();
+ value = (uint64_t)adev->wb.wb[reg_val_offs] |
+ (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ pr_err("failed to read gpu clock\n");
+ return ~0;
+}
+
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
- uint32_t tmp, lsb, msb, i = 0;
- do {
- if (i != 0)
- udelay(1);
- tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
- msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- i++;
- } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
- clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
+ clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
@@ -3936,33 +4315,173 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+ 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
+ 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
+ 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
+ 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
+ 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
+ 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
+ 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
+ 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
+ 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
+ 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
+ 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
+ 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
+ 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
+ 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
+ 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
+ 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
+ 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
+ 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
+ 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
+ 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
+ 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
+ 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
+ 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
+ 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
+ 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
+ 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
+ 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
+ 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
+ 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
+ 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
+ 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
+ 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
+ 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
+ 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
+ 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
+ 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
+ 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
+ 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
+ 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
+ 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
+ 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
+ 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
+ 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
+ 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
+ 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
+ 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
+ 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
+ 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
+ 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
+ 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
+ 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
+ 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
+ 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
+ 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
+ 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
+ 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
+ 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
+ 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
+ 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
+ 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
+ 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
+ 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
+ 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
+ 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
+ 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
+ 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
+ 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
+ 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
+ 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
+ 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
+ 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
+ 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
+ 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
+ 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
+ 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
+ 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
+ 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
+ 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
+ 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
+ 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
+ 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
+ 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
+ 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
+ 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
+ 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
+ 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
+ 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
+ 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
+ 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
+ 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
+ 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
+ 0xbf84fff8, 0xbf810000,
+};
+
+/* When below register arrays changed, please update gpr_reg_size,
+ and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
+ to cover all gfx9 ASICs */
static const struct soc15_reg_entry vgpr_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
-static const struct soc15_reg_entry sgpr_init_regs[] = {
+static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
+static const struct soc15_reg_entry sgpr1_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
- { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
};
-static const struct soc15_reg_entry sec_ded_counter_registers[] = {
+static const struct soc15_reg_entry sgpr2_init_regs[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
+};
+
+static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
@@ -4050,10 +4569,19 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- int r, i, j, k;
+ int r, i;
unsigned total_size, vgpr_offset, sgpr_offset;
u64 gpu_addr;
+ int compute_dim_x = adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se;
+ int sgpr_work_group_size = 5;
+ int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
+ int vgpr_init_shader_size;
+ const u32 *vgpr_init_shader_ptr;
+ const struct soc15_reg_entry *vgpr_init_regs_ptr;
+
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
@@ -4062,27 +4590,40 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
+ vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
+ } else {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
+ vgpr_init_regs_ptr = vgpr_init_regs;
+ }
+
total_size =
- ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
- ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
+ total_size +=
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
- total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
+ total_size += ALIGN(vgpr_init_shader_size, 256);
sgpr_offset = total_size;
total_size += sizeof(sgpr_init_compute_shader);
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
}
/* load the compute shaders */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
- ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+ for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
+ ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
@@ -4092,11 +4633,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* VGPR */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
@@ -4108,7 +4649,35 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
+ ib.ptr[ib.length_dw++] = 1; /* y */
+ ib.ptr[ib.length_dw++] = 1; /* z */
+ ib.ptr[ib.length_dw++] =
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
+
+ /* write CS partial flush packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
+
+ /* SGPR1 */
+ /* write the register state for the compute dispatch */
+ for (i = 0; i < gpr_reg_size; i++) {
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
+ }
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
+ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
+ - PACKET3_SET_SH_REG_START;
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
+
+ /* write dispatch packet */
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4118,13 +4687,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
- /* SGPR */
+ /* SGPR2 */
/* write the register state for the compute dispatch */
- for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
+ for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
@@ -4136,7 +4705,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = 128; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4160,19 +4729,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- /* read back registers to clear the counters */
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
- for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
- for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
- gfx_v9_0_select_se_sh(adev, j, 0x0, k);
- RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
- }
- }
- }
- WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
- mutex_unlock(&adev->grbm_idx_mutex);
-
fail:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -4184,11 +4740,14 @@ static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev);
gfx_v9_0_set_gds_init(adev);
@@ -4202,19 +4761,39 @@ static int gfx_v9_0_ecc_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_gfx_ras_late_init(adev);
- if (r)
- return r;
-
- r = gfx_v9_0_do_edc_gds_workarounds(adev);
- if (r)
- return r;
+ /*
+ * Temp workaround to fix the issue that CP firmware fails to
+ * update read pointer when CPDMA is writing clearing operation
+ * to GDS in suspend/resume sequence on several cards. So just
+ * limit this operation in cold boot sequence.
+ */
+ if ((!adev->in_suspend) &&
+ (adev->gds.gds_size)) {
+ r = gfx_v9_0_do_edc_gds_workarounds(adev);
+ if (r)
+ return r;
+ }
/* requires IBs so do in late init after IB pool is initialized */
- r = gfx_v9_0_do_edc_gpr_workarounds(adev);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
+ else
+ r = gfx_v9_0_do_edc_gpr_workarounds(adev);
+
if (r)
return r;
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_late_init) {
+ r = adev->gfx.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->enable_watchdog_timer)
+ adev->gfx.ras_funcs->enable_watchdog_timer(adev);
+
return 0;
}
@@ -4324,7 +4903,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- if (adev->asic_type != CHIP_VEGA12)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4358,7 +4937,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- if (adev->asic_type != CHIP_VEGA12)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
@@ -4392,13 +4971,13 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (!adev->gfx.num_gfx_rings)
return;
amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ if (enable) {
/* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -4410,8 +4989,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ else
+ data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
@@ -4460,7 +5043,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
/* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
else
@@ -4515,6 +5098,54 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 reg, data;
+
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v9_0_check_rlcg_range(adev, offset,
+ (void *)rlcg_access_gc_9_0,
+ ARRAY_SIZE(rlcg_access_gc_9_0));
+}
+
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
.set_safe_mode = gfx_v9_0_set_safe_mode,
@@ -4526,22 +5157,25 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset,
- .start = gfx_v9_0_rlc_start
+ .start = gfx_v9_0_rlc_start,
+ .update_spm_vmid = gfx_v9_0_update_spm_vmid,
+ .sriov_wreg = gfx_v9_0_sriov_wreg,
+ .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
static int gfx_v9_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_PG_STATE_GATE);
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- if (!enable) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 3, 0):
+ if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- }
+
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
@@ -4564,13 +5198,8 @@ static int gfx_v9_0_set_powergating_state(void *handle,
if (enable)
amdgpu_gfx_off_ctrl(adev, true);
break;
- case CHIP_VEGA12:
- if (!enable) {
- amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- } else {
- amdgpu_gfx_off_ctrl(adev, true);
- }
+ case IP_VERSION(9, 2, 1):
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -4587,15 +5216,17 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
gfx_v9_0_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
@@ -4612,12 +5243,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags = 0;
/* AMD_CG_SUPPORT_GFX_MGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
@@ -4626,18 +5257,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
- data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) {
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
@@ -4674,7 +5305,7 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
@@ -4729,7 +5360,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v9_0_ring_emit_de_meta(ring);
}
@@ -4854,112 +5485,13 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr;
}
-static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
-static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v9_0_hqd_set_priority(adev, ring, acquire);
- gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
/* XXX check if swapping is necessary on BE */
if (ring->use_doorbell) {
- atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
WDOORBELL64(ring->doorbell_index, ring->wptr);
} else{
BUG(); /* only DOORBELL method supported on gfx9 now */
@@ -5040,10 +5572,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+ bool secure)
{
+ uint32_t v = secure ? FRAME_TMZ : 0;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
@@ -5053,8 +5588,6 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
- gfx_v9_0_ring_emit_tmz(ring, true);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -5105,7 +5638,8 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
-static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
@@ -5116,9 +5650,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- adev->virt.reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -5277,6 +5811,7 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ break;
default:
break;
}
@@ -5440,7 +5975,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
}
-static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
+static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
@@ -5888,7 +6423,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
int ret;
struct ta_ras_trigger_error_input block_info = { 0 };
- if (adev->asic_type != CHIP_VEGA20)
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return -EINVAL;
if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
@@ -6013,73 +6548,76 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_walker_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_walker_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_2m_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_2m_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
}
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_4k_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_4k_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
ded_count = (data & 0x00018000L) >> 0xf;
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- atc_l2_cache_4k_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, atc_l2_cache_4k_mems[i],
+ ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6092,36 +6630,39 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id, uint32_t value,
uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
- for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
- if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
- ras_subblock_regs[i].seg != reg->seg ||
- ras_subblock_regs[i].inst != reg->inst)
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
+ if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_0_ras_fields[i].seg != reg->seg ||
+ gfx_v9_0_ras_fields[i].inst != reg->inst)
continue;
sec_cnt = (value &
- ras_subblock_regs[i].sec_count_mask) >>
- ras_subblock_regs[i].sec_count_shift;
+ gfx_v9_0_ras_fields[i].sec_count_mask) >>
+ gfx_v9_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
- ras_subblock_regs[i].name,
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], SEC %d\n",
+ gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = (value &
- ras_subblock_regs[i].ded_count_mask) >>
- ras_subblock_regs[i].ded_count_shift;
+ gfx_v9_0_ras_fields[i].ded_count_mask) >>
+ gfx_v9_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
- ras_subblock_regs[i].name,
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], DED %d\n",
+ gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
@@ -6131,6 +6672,61 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg,
return 0;
}
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i, j, k;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ /* read back registers to clear the counters */
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
+ }
+ }
+ }
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+}
+
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
@@ -6139,7 +6735,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
uint32_t i, j, k;
uint32_t reg_value;
- if (adev->asic_type != CHIP_VEGA20)
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return -EINVAL;
err_data->ue_count = 0;
@@ -6147,16 +6743,17 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
- for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
- for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
gfx_v9_0_select_se_sh(adev, j, 0, k);
reg_value =
- RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
- __get_ras_error_count(&sec_ded_counter_registers[i],
- j, k, reg_value,
- &sec_count, &ded_count);
+ gfx_v9_0_ras_error_count(adev,
+ &gfx_v9_0_edc_counter_regs[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
@@ -6172,6 +6769,84 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int cp_coher_cntl =
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+ /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
+static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
+ uint32_t pipe, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ uint32_t wcl_cs_reg;
+
+ /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+ val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
+
+ switch (pipe) {
+ case 0:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
+ break;
+ case 1:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
+ break;
+ case 2:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
+ break;
+ case 3:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+
+ amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ int i;
+
+
+ /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+ * number of gfx waves. Setting 5 bit will make sure gfx only gets
+ * around 25% of gpu resources.
+ */
+ val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
+ amdgpu_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
+ val);
+
+ /* Restrict waves for normal/low priority compute queues as well
+ * to get best QoS for high priority compute jobs.
+ *
+ * amdgpu controls only 1st ME(0-3 CS pipes).
+ */
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+ if (i != ring->pipe)
+ gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
+
+ }
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -6218,7 +6893,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6234,11 +6910,12 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
- .emit_tmz = gfx_v9_0_ring_emit_tmz,
+ .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -6258,7 +6935,10 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
- 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + /* gfx_v9_0_emit_mem_sync */
+ 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
+ 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6270,10 +6950,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v9_0_ring_set_priority_compute,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .emit_wave_limit = gfx_v9_0_emit_wave_limit,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -6356,13 +7037,15 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
default:
@@ -6373,38 +7056,50 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
adev->gds.gds_size = 0x10000;
break;
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
adev->gds.gds_size = 0x1000;
break;
+ case IP_VERSION(9, 4, 2):
+ /* aldebaran removed all the GDS internal memory,
+ * only support GWS opcode in kernel, like barrier
+ * semaphore.etc */
+ adev->gds.gds_size = 0;
+ break;
default:
adev->gds.gds_size = 0x10000;
break;
}
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 4, 0):
adev->gds.gds_compute_max_wave_id = 0x7ff;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->gds.gds_compute_max_wave_id = 0x27f;
break;
- case CHIP_RAVEN:
- if (adev->rev_id >= 0x8)
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->gds.gds_compute_max_wave_id = 0xfff;
break;
+ case IP_VERSION(9, 4, 2):
+ /* deprecated for Aldebaran, no usage at all */
+ adev->gds.gds_compute_max_wave_id = 0;
+ break;
default:
/* this really depends on the chip */
adev->gds.gds_compute_max_wave_id = 0x7ff;