From 8dc1db3172ae2f17ae71e33b608a33411ce8a1aa Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 14 Sep 2022 16:39:48 +0800 Subject: drm/amdkfd: Introduce kfd_node struct (v5) Introduce a new structure, kfd_node, which will now represent a compute node. kfd_node is carved out of kfd_dev structure. kfd_dev struct now will become the parent of kfd_node, and will store common resources such as doorbells, GTT sub-alloctor etc. kfd_node struct will store all resources specific to a compute node, such as device queue manager, interrupt handling etc. This is the first step in adding compute partition support in KFD. v2: introduce kfd_node struct to gc v11 (Hawking) v3: make reference to kfd_dev struct through kfd_node (Morris) v4: use kfd_node instead for kfd isr/mqd functions (Morris) v5: rebase (Alex) Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Hawking Zhang Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index a537b9ef3e16..e554a48f3054 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -207,7 +207,7 @@ struct device_queue_manager_asic_ops { struct queue *q, struct qcm_process_device *qpd); struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); }; /** @@ -228,7 +228,7 @@ struct device_queue_manager { struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX]; struct packet_manager packet_mgr; - struct kfd_dev *dev; + struct kfd_node *dev; struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */ struct list_head queues; unsigned int saved_flags; -- cgit v1.2.3-70-g09d2 From a805889a15315f7fa78c1c4bb2f1875c7c43f919 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:52:39 -0400 Subject: drm/amdkfd: Update SDMA queue management for GFX9.4.3 This patch updates SDMA queue management for multi XCC in GFX9.4.3. - Allocate/deallocate SDMA queues from the correct SDMA engines based on the partition mode. - Updates the kgd2kfd interface to fetch the correct SDMA register addresses. - It also fixes dumping correct SDMA queue info in debugfs. v2: squash in fix "drm/amdkfd: Fix XGMI SDMA user-mode queue allocation" Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 194 ++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 59 +++---- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 + 5 files changed, 227 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 49d8087e469e..e81bdca53f42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -31,6 +31,192 @@ #include "oss/osssys_4_0_sh_mask.h" #include "v9_structs.h" #include "soc15.h" +#include "sdma/sdma_4_4_2_offset.h" +#include "sdma/sdma_4_4_2_sh_mask.h" + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, engine_id, + regSDMA_RLC0_RB_CNTL) - + regSDMA_RLC0_RB_CNTL; + uint32_t retval = sdma_engine_reg_base + + queue_id * (regSDMA_RLC1_RB_CNTL - regSDMA_RLC0_RB_CNTL); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); + return retval; +} + +int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (data & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, data); + + return 0; +} + +int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+12) +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32(addr); \ + } while (0) + + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = regSDMA_RLC0_RB_CNTL; reg <= regSDMA_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_STATUS; reg <= regSDMA_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_IB_SUB_REMAIN; + reg <= regSDMA_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_MIDCMD_DATA0; + reg <= regSDMA_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, + unsigned int utimeout) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + temp = temp & ~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (temp & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL) | + SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI); + + return 0; +} static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid, uint32_t inst) @@ -166,13 +352,13 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_4_3_hqd_load, .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, - .hqd_sdma_load = kgd_arcturus_hqd_sdma_load, + .hqd_sdma_load = kgd_gfx_v9_4_3_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, - .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump, + .hqd_sdma_dump = kgd_gfx_v9_4_3_hqd_sdma_dump, .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, + .hqd_sdma_is_occupied = kgd_gfx_v9_4_3_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, - .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, + .hqd_sdma_destroy = kgd_gfx_v9_4_3_hqd_sdma_destroy, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 37c6dc5c37bf..ec5f85ff34e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -741,6 +741,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (!node) goto node_alloc_error; + node->node_id = i; node->adev = kfd->adev; node->kfd = kfd; node->kfd2kgd = kfd->kfd2kgd; @@ -1323,15 +1324,16 @@ unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) { /* If XGMI is not supported, all SDMA engines are PCIe */ if (!node->adev->gmc.xgmi.supported) - return node->adev->sdma.num_instances; + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes; - return min(node->adev->sdma.num_instances, 2); + return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2); } unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) { /* After reserved for PCIe, the rest of engines are XGMI */ - return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node); + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes - + kfd_get_num_sdma_engines(node); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f78c1e7aad57..69419a53a14e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -124,6 +124,15 @@ static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manag return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } +static void init_sdma_bitmaps(struct device_queue_manager *dqm) +{ + bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm)); + + bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); +} + void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1268,24 +1277,6 @@ static void init_interrupts(struct device_queue_manager *dqm) } } -static void init_sdma_bitmaps(struct device_queue_manager *dqm) -{ - unsigned int num_sdma_queues = - min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8, - get_num_sdma_queues(dqm)); - unsigned int num_xgmi_sdma_queues = - min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8, - get_num_xgmi_sdma_queues(dqm)); - - if (num_sdma_queues) - dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0); - if (num_xgmi_sdma_queues) - dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0); - - dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm); - pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap); -} - static int initialize_nocpsch(struct device_queue_manager *dqm) { int pipe, queue; @@ -1375,46 +1366,49 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, int bit; if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - if (dqm->sdma_bitmap == 0) { + if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { /* Find first available sdma_id */ - bit = __ffs64(dqm->sdma_bitmap); - dqm->sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->sdma_bitmap, + get_num_sdma_queues(dqm)); + clear_bit(bit, dqm->sdma_bitmap); q->sdma_id = bit; } - q->properties.sdma_engine_id = q->sdma_id % - kfd_get_num_sdma_engines(dqm->dev); + q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + + q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - if (dqm->xgmi_sdma_bitmap == 0) { + if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more XGMI SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { - bit = __ffs64(dqm->xgmi_sdma_bitmap); - dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->xgmi_sdma_bitmap, + get_num_xgmi_sdma_queues(dqm)); + clear_bit(bit, dqm->xgmi_sdma_bitmap); q->sdma_id = bit; } /* sdma_engine_id is sdma id including @@ -1424,6 +1418,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * PCIe-optimized ones */ q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + kfd_get_num_sdma_engines(dqm->dev) + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / @@ -1442,11 +1437,11 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { if (q->sdma_id >= get_num_sdma_queues(dqm)) return; - dqm->sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->sdma_bitmap); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) return; - dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index e554a48f3054..b11c474d4067 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -239,8 +239,8 @@ struct device_queue_manager { unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; - uint64_t sdma_bitmap; - uint64_t xgmi_sdma_bitmap; + DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES); + DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); /* the pasid mapping for each kfd vmid */ uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1337fcdf8958..5cfebcc8b305 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -113,6 +113,8 @@ #define KFD_UNMAP_LATENCY_MS (4000) +#define KFD_MAX_SDMA_QUEUES 128 + /* * 512 = 0x200 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the @@ -260,6 +262,7 @@ struct kfd_vmid_info { struct kfd_dev; struct kfd_node { + unsigned int node_id; struct amdgpu_device *adev; /* Duplicated here along with keeping * a copy in kfd_dev to save a hop */ -- cgit v1.2.3-70-g09d2 From c2d43918a14f7b0f04932f5a45728e0fe8161da0 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 9 Dec 2022 09:03:01 -0500 Subject: drm/amdkfd: Setup current_logical_xcc_id in MQD Setup rolling current_logical_xcc_id in MQD for GFX9.4.3 to ensure each queue starts at a different place and prevent hotspotting issues. Also, remove updating current_logical_xcc_id during queue update. Suggested-by: Joseph Greathouse Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 13 +++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index b11c474d4067..cd4383bb207f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -256,6 +256,9 @@ struct device_queue_manager { struct work_struct hw_exception_work; struct kfd_mem_obj hiq_sdma_mqd; bool sched_running; + + /* used for GFX 9.4.3 only */ + uint32_t current_logical_xcc_start; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 2e2a0f8586f7..c781314b213c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -32,6 +32,7 @@ #include "gc/gc_9_0_sh_mask.h" #include "sdma0/sdma0_4_0_sh_mask.h" #include "amdgpu_amdkfd.h" +#include "kfd_device_queue_manager.h" static void update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q, @@ -569,6 +570,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, uint64_t xcc_gart_addr = 0; uint64_t xcc_ctx_save_restore_area_address; uint64_t offset = mm->mqd_stride(mm, q); + uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { @@ -596,18 +598,17 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, if (q->format == KFD_QUEUE_FORMAT_AQL) { m->compute_tg_chunk_size = 1; + m->compute_current_logic_xcc_id = + (local_xcc_start + xcc) % + mm->dev->num_xcc_per_node; switch (xcc) { case 0: /* Master XCC */ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; - m->compute_current_logic_xcc_id = - mm->dev->num_xcc_per_node - 1; break; default: - m->compute_current_logic_xcc_id = - xcc - 1; break; } } else { @@ -642,12 +643,8 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, /* Master XCC */ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; - m->compute_current_logic_xcc_id = - mm->dev->num_xcc_per_node - 1; break; default: - m->compute_current_logic_xcc_id = - xcc - 1; break; } m->compute_tg_chunk_size = 1; -- cgit v1.2.3-70-g09d2 From 7cee6a6824a0429a6255abe91b5af01b9a01cd03 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 23 Mar 2023 17:17:20 -0400 Subject: drm/amdgpu: add configurable grace period for unmap queues The HWS schedule allows a grace period for wave completion prior to preemption for better performance by avoiding CWSR on waves that can potentially complete quickly. The debugger, on the other hand, will want to inspect wave status immediately after it actively triggers preemption (a suspend function to be provided). To minimize latency between preemption and debugger wave inspection, allow immediate preemption by setting the grace period to 0. Note that setting the preepmtion grace period to 0 will result in an infinite grace period being set due to a CP FW bug so set it to 1 for now. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 2 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 43 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 6 ++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 43 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 8 ++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 63 +++++++++++++++------ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 + drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 32 +++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 39 +++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c | 2 + drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 65 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 ++ 14 files changed, 295 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index a6f98141c29c..b811a0985050 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -82,5 +82,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .enable_debug_trap = kgd_aldebaran_enable_debug_trap, .disable_debug_trap = kgd_aldebaran_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index d2918e5c0dea..a62bd0068515 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -410,6 +410,8 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_arcturus_enable_debug_trap, .disable_debug_trap = kgd_arcturus_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 240f5006e278..98006c7021dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -803,6 +803,47 @@ uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, return 0; } +/* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values + * The values read are: + * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. + * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. + * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. + * gws_wait_time -- Wait Count for Global Wave Syncs. + * que_sleep_wait_time -- Wait Count for Dequeue Retry. + * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. + * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. + * deq_retry_wait_time -- Wait Count for Global Wave Syncs. + */ +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times) + +{ + *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); +} + +void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data) +{ + *reg_data = wait_times; + + /* + * The CP cannont handle a 0 grace period input and will result in + * an infinite grace period being set so set to 1 to prevent this. + */ + if (grace_period == 0) + grace_period = 1; + + *reg_data = REG_SET_FIELD(*reg_data, + CP_IQ_WAIT_TIME2, + SCH_WAVE, + grace_period); + + *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); +} + static void program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) @@ -848,5 +889,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .set_vm_context_page_table_base = set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 251d61fbde07..1e993a213646 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -26,3 +26,9 @@ uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 8b293f3dcbd2..387bdf4823c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -672,6 +672,8 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3, .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, .program_trap_handler_settings = program_trap_handler_settings_v10_3, + .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 8d7d04704b00..829ee720cc44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -739,6 +739,24 @@ uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, return 0; } +/* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values + * The values read are: + * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. + * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. + * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. + * gws_wait_time -- Wait Count for Global Wave Syncs. + * que_sleep_wait_time -- Wait Count for Dequeue Retry. + * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. + * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. + * deq_retry_wait_time -- Wait Count for Global Wave Syncs. + */ +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times) + +{ + *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); +} + void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { @@ -926,6 +944,29 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, adev->gfx.cu_info.max_waves_per_simd; } +void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data) +{ + *reg_data = wait_times; + + /* + * The CP cannont handle a 0 grace period input and will result in + * an infinite grace period being set so set to 1 to prevent this. + */ + if (grace_period == 0) + grace_period = 1; + + *reg_data = REG_SET_FIELD(*reg_data, + CP_IQ_WAIT_TIME2, + SCH_WAVE, + grace_period); + + *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); +} + void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) { @@ -969,6 +1010,8 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v9_enable_debug_trap, .disable_debug_trap = kgd_gfx_v9_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 9588ff055393..fed5b7f18b1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ - - void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, @@ -73,3 +71,9 @@ uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2baa0781eafc..0b88a64e61fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -46,10 +46,13 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param); + uint32_t filter_param, + uint32_t grace_period); static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset); + uint32_t filter_param, + uint32_t grace_period, + bool reset); static int map_queues_cpsch(struct device_queue_manager *dqm); @@ -866,7 +869,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { if (!dqm->dev->kfd->shared_resources.enable_mes) retval = unmap_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else if (prev_active) retval = remove_queue_mes(dqm, q, &pdd->qpd); @@ -1042,7 +1045,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); out: dqm_unlock(dqm); @@ -1182,8 +1186,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, } if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); - + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; atomic64_add(eviction_duration, &pdd->evict_duration_counter); vm_not_acquired: @@ -1525,6 +1528,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm) init_sdma_bitmaps(dqm); + if (dqm->dev->kfd2kgd->get_iq_wait_times) + dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, + &dqm->wait_times); return 0; } @@ -1563,8 +1569,9 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->is_hws_hang = false; dqm->is_resetting = false; dqm->sched_running = true; + if (!dqm->dev->kfd->shared_resources.enable_mes) - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return 0; @@ -1589,7 +1596,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) if (!dqm->is_hws_hang) { if (!dqm->dev->kfd->shared_resources.enable_mes) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); + unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else remove_all_queues_mes(dqm); } @@ -1631,7 +1638,8 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, list_add(&kq->list, &qpd->priv_queue_list); increment_queue_count(dqm, qpd, kq->queue); qpd->is_debug = true; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return 0; @@ -1645,7 +1653,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, list_del(&kq->list); decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); /* * Unconditionally decrement this counter, regardless of the queue's * type. @@ -1722,7 +1731,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); else retval = add_queue_mes(dqm, q, qpd); if (retval) @@ -1811,7 +1820,9 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) /* dqm->lock mutex has to be locked before calling this function */ static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset) + uint32_t filter_param, + uint32_t grace_period, + bool reset) { int retval = 0; struct mqd_manager *mqd_mgr; @@ -1823,6 +1834,12 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->active_runlist) return retval; + if (grace_period != USE_DEFAULT_GRACE_PERIOD) { + retval = pm_update_grace_period(&dqm->packet_mgr, grace_period); + if (retval) + return retval; + } + retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset); if (retval) return retval; @@ -1855,6 +1872,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, return -ETIME; } + /* We need to reset the grace period value for this device */ + if (grace_period != USE_DEFAULT_GRACE_PERIOD) { + if (pm_update_grace_period(&dqm->packet_mgr, + USE_DEFAULT_GRACE_PERIOD)) + pr_err("Failed to reset grace period\n"); + } + pm_release_ib(&dqm->packet_mgr); dqm->active_runlist = false; @@ -1870,7 +1894,7 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm, dqm_lock(dqm); retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID, - pasid, true); + pasid, USE_DEFAULT_GRACE_PERIOD, true); dqm_unlock(dqm); return retval; @@ -1879,13 +1903,14 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm, /* dqm->lock mutex has to be locked before calling this function */ static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param) + uint32_t filter_param, + uint32_t grace_period) { int retval; if (dqm->is_hws_hang) return -EIO; - retval = unmap_queues_cpsch(dqm, filter, filter_param, false); + retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false); if (retval) return retval; @@ -1943,7 +1968,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, if (!dqm->dev->kfd->shared_resources.enable_mes) { decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); if (retval == -ETIME) qpd->reset_wavefronts = true; } else { @@ -2228,7 +2254,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, } if (!dqm->dev->kfd->shared_resources.enable_mes) - retval = execute_queues_cpsch(dqm, filter, 0); + retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD); if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); @@ -2589,7 +2615,8 @@ int dqm_debugfs_hang_hws(struct device_queue_manager *dqm) return r; } dqm->active_runlist = true; - r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, + 0, USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index cd4383bb207f..d4dd3b4acbf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -37,6 +37,7 @@ #define KFD_MES_PROCESS_QUANTUM 100000 #define KFD_MES_GANG_QUANTUM 10000 +#define USE_DEFAULT_GRACE_PERIOD 0xffffffff struct device_process_node { struct qcm_process_device *qpd; @@ -259,6 +260,8 @@ struct device_queue_manager { /* used for GFX 9.4.3 only */ uint32_t current_logical_xcc_start; + + uint32_t wait_times; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 2f54172e9175..401096c103b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -370,6 +370,38 @@ out: return retval; } +int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) +{ + int retval = 0; + uint32_t *buffer, size; + + size = pm->pmf->set_grace_period_size; + + mutex_lock(&pm->lock); + + if (size) { + kq_acquire_packet_buffer(pm->priv_queue, + size / sizeof(uint32_t), + (unsigned int **)&buffer); + + if (!buffer) { + pr_err("Failed to allocate buffer on kernel queue\n"); + retval = -ENOMEM; + goto out; + } + + retval = pm->pmf->set_grace_period(pm, buffer, grace_period); + if (!retval) + kq_submit_packet(pm->priv_queue); + else + kq_rollback_packet(pm->priv_queue); + } + +out: + mutex_unlock(&pm->lock); + return retval; +} + int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_unmap_queues_filter filter, uint32_t filter_param, bool reset) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 44cf3a5f6fdb..1fda6dcf84b1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -262,6 +262,41 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, return 0; } +static int pm_set_grace_period_v9(struct packet_manager *pm, + uint32_t *buffer, + uint32_t grace_period) +{ + struct pm4_mec_write_data_mmio *packet; + uint32_t reg_offset = 0; + uint32_t reg_data = 0; + + pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( + pm->dqm->dev->adev, + pm->dqm->wait_times, + grace_period, + ®_offset, + ®_data); + + if (grace_period == USE_DEFAULT_GRACE_PERIOD) + reg_data = pm->dqm->wait_times; + + packet = (struct pm4_mec_write_data_mmio *)buffer; + memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio)); + + packet->header.u32All = pm_build_pm4_header(IT_WRITE_DATA, + sizeof(struct pm4_mec_write_data_mmio)); + + packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register; + packet->bitfields2.addr_incr = + addr_incr___write_data__do_not_increment_address; + + packet->bitfields3.dst_mmreg_addr = reg_offset; + + packet->data = reg_data; + + return 0; +} + static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, enum kfd_unmap_queues_filter filter, uint32_t filter_param, bool reset) @@ -345,6 +380,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, + .set_grace_period = pm_set_grace_period_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -352,6 +388,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; @@ -362,6 +399,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, + .set_grace_period = pm_set_grace_period_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process_aldebaran), @@ -369,6 +407,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index faf4772ed317..c1199d06d131 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -303,6 +303,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources = pm_set_resources_vi, .map_queues = pm_map_queues_vi, .unmap_queues = pm_unmap_queues_vi, + .set_grace_period = NULL, .query_status = pm_query_status_vi, .release_mem = pm_release_mem_vi, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -310,6 +311,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = 0, .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = sizeof(struct pm4_mec_release_mem) }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 2ad708c64012..206f1960857f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -584,6 +584,71 @@ struct pm4_mec_release_mem { #endif +#ifndef PM4_MEC_WRITE_DATA_DEFINED +#define PM4_MEC_WRITE_DATA_DEFINED + +enum WRITE_DATA_dst_sel_enum { + dst_sel___write_data__mem_mapped_register = 0, + dst_sel___write_data__tc_l2 = 2, + dst_sel___write_data__gds = 3, + dst_sel___write_data__memory = 5, + dst_sel___write_data__memory_mapped_adc_persistent_state = 6, +}; + +enum WRITE_DATA_addr_incr_enum { + addr_incr___write_data__increment_address = 0, + addr_incr___write_data__do_not_increment_address = 1 +}; + +enum WRITE_DATA_wr_confirm_enum { + wr_confirm___write_data__do_not_wait_for_write_confirmation = 0, + wr_confirm___write_data__wait_for_write_confirmation = 1 +}; + +enum WRITE_DATA_cache_policy_enum { + cache_policy___write_data__lru = 0, + cache_policy___write_data__stream = 1 +}; + + +struct pm4_mec_write_data_mmio { + union { + union PM4_MES_TYPE_3_HEADER header; /*header */ + unsigned int ordinal1; + }; + + union { + struct { + unsigned int reserved1:8; + unsigned int dst_sel:4; + unsigned int reserved2:4; + unsigned int addr_incr:1; + unsigned int reserved3:2; + unsigned int resume_vf:1; + unsigned int wr_confirm:1; + unsigned int reserved4:4; + unsigned int cache_policy:2; + unsigned int reserved5:5; + } bitfields2; + unsigned int ordinal2; + }; + + union { + struct { + unsigned int dst_mmreg_addr:18; + unsigned int reserved6:14; + } bitfields3; + unsigned int ordinal3; + }; + + uint32_t reserved7; + + uint32_t data; + +}; + +#endif + enum { CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014 }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1b272f879b4c..4c912b7735b5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1350,6 +1350,8 @@ struct packet_manager_funcs { int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, enum kfd_unmap_queues_filter mode, uint32_t filter_param, bool reset); + int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, + uint32_t grace_period); int (*query_status)(struct packet_manager *pm, uint32_t *buffer, uint64_t fence_address, uint64_t fence_value); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); @@ -1360,6 +1362,7 @@ struct packet_manager_funcs { int set_resources_size; int map_queues_size; int unmap_queues_size; + int set_grace_period_size; int query_status_size; int release_mem_size; }; @@ -1382,6 +1385,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, void pm_release_ib(struct packet_manager *pm); +int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); + /* Following PM funcs can be shared among VI and AI */ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); -- cgit v1.2.3-70-g09d2 From 97ae3c8cce96f3bebf883d0812cef5d3fdbe3e64 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 4 Apr 2022 12:27:43 -0400 Subject: drm/amdkfd: prepare map process for single process debug devices Older HW only supports debugging on a single process because the SPI debug mode setting registers are device global. The HWS has supplied a single pinned VMID (0xf) for MAP_PROCESS for debug purposes. To pin the VMID, the KFD will remove the VMID from the HWS dynamic VMID allocation via SET_RESOUCES so that a debugged process will never migrate away from its pinned VMID. The KFD is responsible for reserving and releasing this pinned VMID accordingly whenever the debugger attaches and detaches respectively. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 93 ++++++++++++++++++++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 9 +++ drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 5 +- 4 files changed, 111 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0b88a64e61fe..cfe5bd59070e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1525,6 +1525,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) dqm->gws_queue_count = 0; dqm->active_runlist = false; INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); + dqm->trap_debug_vmid = 0; init_sdma_bitmaps(dqm); @@ -2501,6 +2502,98 @@ static void kfd_process_hw_exception(struct work_struct *work) amdgpu_amdkfd_gpu_reset(dqm->dev->adev); } +int reserve_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + int r; + int updated_vmid_mask; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + dqm_lock(dqm); + + if (dqm->trap_debug_vmid != 0) { + pr_err("Trap debug id already reserved\n"); + r = -EBUSY; + goto out_unlock; + } + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD, false); + if (r) + goto out_unlock; + + updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd); + + dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; + dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd; + r = set_sched_resources(dqm); + if (r) + goto out_unlock; + + r = map_queues_cpsch(dqm); + if (r) + goto out_unlock; + + pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid); + +out_unlock: + dqm_unlock(dqm); + return r; +} + +/* + * Releases vmid for the trap debugger + */ +int release_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + int r; + int updated_vmid_mask; + uint32_t trap_debug_vmid; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + dqm_lock(dqm); + trap_debug_vmid = dqm->trap_debug_vmid; + if (dqm->trap_debug_vmid == 0) { + pr_err("Trap debug id is not reserved\n"); + r = -EINVAL; + goto out_unlock; + } + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD, false); + if (r) + goto out_unlock; + + updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd); + + dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; + dqm->trap_debug_vmid = 0; + r = set_sched_resources(dqm); + if (r) + goto out_unlock; + + r = map_queues_cpsch(dqm); + if (r) + goto out_unlock; + + pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid); + +out_unlock: + dqm_unlock(dqm); + return r; +} + #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d4dd3b4acbf0..bf7aa3f84182 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -250,6 +250,7 @@ struct device_queue_manager { struct kfd_mem_obj *fence_mem; bool active_runlist; int sched_policy; + uint32_t trap_debug_vmid; /* hw exception */ bool is_hws_hang; @@ -285,6 +286,10 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); unsigned int get_num_sdma_queues(struct device_queue_manager *dqm); unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm); +int reserve_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); +int release_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 1fda6dcf84b1..0fe73dbd28af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -34,6 +34,9 @@ static int pm_map_process_v9(struct packet_manager *pm, { struct pm4_mes_map_process *packet; uint64_t vm_page_table_base_addr = qpd->page_table_base; + struct kfd_node *kfd = pm->dqm->dev; + struct kfd_process_device *pdd = + container_of(qpd, struct kfd_process_device, qpd); packet = (struct pm4_mes_map_process *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process)); @@ -49,6 +52,12 @@ static int pm_map_process_v9(struct packet_manager *pm, packet->bitfields14.sdma_enable = 1; packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && + pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { + packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; + packet->bitfields2.new_debug = 1; + } + packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; if (qpd->tba_addr) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 206f1960857f..8b6b2bd5c148 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -146,7 +146,10 @@ struct pm4_mes_map_process { union { struct { uint32_t pasid:16; - uint32_t reserved1:8; + uint32_t reserved1:2; + uint32_t debug_vmid:4; + uint32_t new_debug:1; + uint32_t reserved2:1; uint32_t diq_enable:1; uint32_t process_quantum:7; } bitfields2; -- cgit v1.2.3-70-g09d2 From 0de4ec9a03537bd2b189b5afbf83acd6b72b0258 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 4 Apr 2022 13:38:11 -0400 Subject: drm/amdgpu: prepare map process for multi-process debug devices Unlike single process debug devices, multi-process debug devices allow debug mode setting per-VMID (non-device-global). Because the HWS manages PASID-VMID mapping, the new MAP_PROCESS API allows the KFD to forward the required SPI debug register write requests. To request a new debug mode setting change, the KFD must be able to preempt all queues then remap all queues with these new setting requests for MAP_PROCESS to take effect. Note that by default, trap enablement in non-debug mode must be disabled for performance reasons for multi-process debug devices due to setup overhead in FW. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 5 +++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 51 ++++++++++++++++++++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 14 ++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 5 +++ 6 files changed, 87 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index a8abfe2a0a14..db6d72e7930f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -29,4 +29,9 @@ int kfd_dbg_trap_disable(struct kfd_process *target); int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, void __user *runtime_info, uint32_t *runtime_info_size); +static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) +{ + return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2); +} + #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index cfe5bd59070e..495c9238254e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -36,6 +36,7 @@ #include "kfd_kernel_queue.h" #include "amdgpu_amdkfd.h" #include "mes_api_def.h" +#include "kfd_debug.h" /* Size of the per-pipe EOP queue */ #define CIK_HPD_EOP_BYTES_LOG2 11 @@ -2594,6 +2595,56 @@ out_unlock: return r; } +int debug_lock_and_unmap(struct device_queue_manager *dqm) +{ + int r; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) + return 0; + + dqm_lock(dqm); + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false); + if (r) + dqm_unlock(dqm); + + return r; +} + +int debug_map_and_unlock(struct device_queue_manager *dqm) +{ + int r; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) + return 0; + + r = map_queues_cpsch(dqm); + + dqm_unlock(dqm); + + return r; +} + +int debug_refresh_runlist(struct device_queue_manager *dqm) +{ + int r = debug_lock_and_unmap(dqm); + + if (r) + return r; + + return debug_map_and_unlock(dqm); +} + #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index bf7aa3f84182..bb75d93712eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -290,6 +290,9 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +int debug_lock_and_unmap(struct device_queue_manager *dqm); +int debug_map_and_unlock(struct device_queue_manager *dqm); +int debug_refresh_runlist(struct device_queue_manager *dqm); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 0fe73dbd28af..29a2d0499b67 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -88,6 +88,10 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, { struct pm4_mes_map_process_aldebaran *packet; uint64_t vm_page_table_base_addr = qpd->page_table_base; + struct kfd_dev *kfd = pm->dqm->dev->kfd; + struct kfd_process_device *pdd = + container_of(qpd, struct kfd_process_device, qpd); + int i; packet = (struct pm4_mes_map_process_aldebaran *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran)); @@ -102,6 +106,16 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, packet->bitfields14.num_oac = qpd->num_oac; packet->bitfields14.sdma_enable = 1; packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override | + pdd->spi_dbg_launch_mode; + + if (pdd->process->debug_trap_enabled) { + for (i = 0; i < kfd->device_info.num_of_watch_points; i++) + packet->tcp_watch_cntl[i] = pdd->watch_points[i]; + + packet->bitfields2.single_memops = + !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); + } packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4c912b7735b5..8fca7175daab 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -816,6 +816,12 @@ struct kfd_process_device { uint64_t faults; uint64_t page_in; uint64_t page_out; + + /* Tracks debug per-vmid request settings */ + uint32_t spi_dbg_override; + uint32_t spi_dbg_launch_mode; + uint32_t watch_points[4]; + /* * If this process has been checkpointed before, then the user * application will use the original gpu_id on the @@ -952,6 +958,9 @@ struct kfd_process { bool xnack_enabled; + /* Tracks debug per-vmid request for debug flags */ + bool dbg_flags; + atomic_t poison; /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ bool queues_paused; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d75dac92775c..725d936b2cc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1612,6 +1612,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, } p->pdds[p->n_pdds++] = pdd; + if (kfd_dbg_is_per_vmid_supported(pdd->dev)) + pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( + pdd->dev->adev, + false, + 0); /* Init idr used for memory handle translation */ idr_init(&pdd->alloc_idr); -- cgit v1.2.3-70-g09d2 From a70a93fa568b4f05aba548dadb673703eccf5480 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 5 May 2022 16:15:37 -0400 Subject: drm/amdkfd: add debug suspend and resume process queues operation In order to inspect waves from the saved context at any point during a debug session, the debugger must be able to preempt queues to trigger context save by suspending them. On queue suspend, the KFD will copy the context save header information so that the debugger can correctly crawl the appropriate size of the saved context. The debugger must then also be allowed to resume suspended queues. A queue that is newly created cannot be suspended because queue ids are recycled after destruction so the debugger needs to know that this has occurred. Query functions will be later added that will clear a given queue of its new queue status. A queue cannot be destroyed while it is suspended to preserve its saved context during debugger inspection. Have queue destruction block while a queue is suspended and unblocked when it is resumed. Likewise, if a queue is about to be destroyed, it cannot be suspended. Return the number of queues successfully suspended or resumed along with a per queue status array where the upper bits per queue status show that the request was invalid (new/destroyed queue suspend request, missing queue) or an error occurred (HWS in a fatal state so it can't suspend or resume queues). Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 11 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 7 + .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 447 ++++++++++++++++++++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 10 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 10 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 15 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 1 + 11 files changed, 512 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 98cd52bb005f..b4fcad0e62f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -772,6 +772,11 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) return adev->have_atomics_support; } +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) +{ + amdgpu_device_flush_hdp(adev, NULL); +} + void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset) { amdgpu_umc_poison_handler(adev, reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index dd740e64e6e1..2d0406bff84e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -322,6 +322,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, uint64_t *mmap_offset); int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, struct dma_buf **dmabuf); +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a6570b124b2b..1fae97df7a1e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -410,6 +410,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, pr_debug("Write ptr address == 0x%016llX\n", args->write_pointer_address); + kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0); return 0; err_create_queue: @@ -2996,7 +2997,17 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v args->launch_mode.launch_mode); break; case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: + r = suspend_queues(target, + args->suspend_queues.num_queues, + args->suspend_queues.grace_period, + args->suspend_queues.exception_mask, + (uint32_t *)args->suspend_queues.queue_array_ptr); + + break; case KFD_IOC_DBG_TRAP_RESUME_QUEUES: + r = resume_queues(target, args->resume_queues.num_queues, + (uint32_t *)args->resume_queues.queue_array_ptr); + break; case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_SET_FLAGS: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 53c3418562d4..f4d3dfb35cb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -339,6 +339,13 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind } kfd_dbg_set_workaround(target, false); + + if (!unwind) { + int resume_count = resume_queues(target, 0, NULL); + + if (resume_count) + pr_debug("Resumed %d queues\n", resume_count); + } } static void kfd_dbg_clean_exception_status(struct kfd_process *target) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 44d87943e40a..bc9e81293165 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -952,6 +952,92 @@ out_unlock: return retval; } +/* suspend_single_queue does not lock the dqm like the + * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int suspend_single_queue(struct device_queue_manager *dqm, + struct kfd_process_device *pdd, + struct queue *q) +{ + bool is_new; + + if (q->properties.is_suspended) + return 0; + + pr_debug("Suspending PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW); + + if (is_new || q->properties.is_being_destroyed) { + pr_debug("Suspend: skip %s queue id %i\n", + is_new ? "new" : "destroyed", + q->properties.queue_id); + return -EBUSY; + } + + q->properties.is_suspended = true; + if (q->properties.is_active) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = remove_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + decrement_queue_count(dqm, &pdd->qpd, q); + q->properties.is_active = false; + } + + return 0; +} + +/* resume_single_queue does not lock the dqm like the functions + * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int resume_single_queue(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + struct kfd_process_device *pdd; + + if (!q->properties.is_suspended) + return 0; + + pdd = qpd_to_pdd(qpd); + + pr_debug("Restoring from suspend PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + q->properties.is_suspended = false; + + if (QUEUE_IS_ACTIVE(q->properties)) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = add_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + q->properties.is_active = true; + increment_queue_count(dqm, qpd, q); + } + + return 0; +} + static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1926,6 +2012,31 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, return map_queues_cpsch(dqm); } +static int wait_on_destroy_queue(struct device_queue_manager *dqm, + struct queue *q) +{ + struct kfd_process_device *pdd = kfd_get_process_device_data(q->device, + q->process); + int ret = 0; + + if (pdd->qpd.is_debug) + return ret; + + q->properties.is_being_destroyed = true; + + if (pdd->process->debug_trap_enabled && q->properties.is_suspended) { + dqm_unlock(dqm); + mutex_unlock(&q->process->mutex); + ret = wait_event_interruptible(dqm->destroy_wait, + !q->properties.is_suspended); + + mutex_lock(&q->process->mutex); + dqm_lock(dqm); + } + + return ret; +} + static int destroy_queue_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) @@ -1945,11 +2056,16 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, q->properties.queue_id); } - retval = 0; - /* remove queue from list to prevent rescheduling after preemption */ dqm_lock(dqm); + retval = wait_on_destroy_queue(dqm, q); + + if (retval) { + dqm_unlock(dqm); + return retval; + } + if (qpd->is_debug) { /* * error, currently we do not allow to destroy a queue @@ -1996,7 +2112,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, dqm_unlock(dqm); - /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */ + /* + * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid + * circular locking + */ + kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE), + qpd->pqm->process, q->device, + -1, false, NULL, 0); + mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); return retval; @@ -2461,8 +2584,10 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) goto out_free; } - if (!dqm->ops.initialize(dqm)) + if (!dqm->ops.initialize(dqm)) { + init_waitqueue_head(&dqm->destroy_wait); return dqm; + } out_free: kfree(dqm); @@ -2602,6 +2727,320 @@ out_unlock: return r; } +#define QUEUE_NOT_FOUND -1 +/* invalidate queue operation in array */ +static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK; +} + +/* find queue index in array */ +static int q_array_get_index(unsigned int queue_id, + uint32_t num_queues, + uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK)) + return i; + + return QUEUE_NOT_FOUND; +} + +struct copy_context_work_handler_workarea { + struct work_struct copy_context_work; + struct kfd_process *p; +}; + +static void copy_context_work_handler (struct work_struct *work) +{ + struct copy_context_work_handler_workarea *workarea; + struct mqd_manager *mqd_mgr; + struct queue *q; + struct mm_struct *mm; + struct kfd_process *p; + uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size; + int i; + + workarea = container_of(work, + struct copy_context_work_handler_workarea, + copy_context_work); + + p = workarea->p; + mm = get_task_mm(p->lead_thread); + + if (!mm) + return; + + kthread_use_mm(mm); + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + + list_for_each_entry(q, &qpd->queues_list, list) { + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; + + /* We ignore the return value from get_wave_state + * because + * i) right now, it always returns 0, and + * ii) if we hit an error, we would continue to the + * next queue anyway. + */ + mqd_mgr->get_wave_state(mqd_mgr, + q->mqd, + &q->properties, + (void __user *) q->properties.ctx_save_restore_area_address, + &tmp_ctl_stack_used_size, + &tmp_save_area_used_size); + } + } + kthread_unuse_mm(mm); + mmput(mm); +} + +static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) +{ + size_t array_size = num_queues * sizeof(uint32_t); + uint32_t *queue_ids = NULL; + + if (!usr_queue_id_array) + return NULL; + + queue_ids = kzalloc(array_size, GFP_KERNEL); + if (!queue_ids) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(queue_ids, usr_queue_id_array, array_size)) + return ERR_PTR(-EFAULT); + + return queue_ids; +} + +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = NULL; + int total_resumed = 0; + int i; + + if (usr_queue_id_array) { + queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. unmask per successful request */ + q_array_invalidate(num_queues, queue_ids); + } + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_resumed = 0; + + dqm_lock(dqm); + + /* unmask queues that resume or already resumed as valid */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = QUEUE_NOT_FOUND; + + if (queue_ids) + q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { + int err = resume_single_queue(dqm, &pdd->qpd, q); + + if (queue_ids) { + if (!err) { + queue_ids[q_idx] &= + ~KFD_DBG_QUEUE_INVALID_MASK; + } else { + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + + if (dqm->dev->kfd->shared_resources.enable_mes) { + wake_up_all(&dqm->destroy_wait); + if (!err) + total_resumed++; + } else { + per_device_resumed++; + } + } + } + + if (!per_device_resumed) { + dqm_unlock(dqm); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, + 0, + USE_DEFAULT_GRACE_PERIOD); + if (r) { + pr_err("Failed to resume process queues\n"); + if (queue_ids) { + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + /* mask queue as error on resume fail */ + if (q_idx != QUEUE_NOT_FOUND) + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + } + } + } else { + wake_up_all(&dqm->destroy_wait); + total_resumed += per_device_resumed; + } + + dqm_unlock(dqm); + } + + if (queue_ids) { + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue resume\n"); + + kfree(queue_ids); + } + + return total_resumed; +} + +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + int total_suspended = 0; + int i; + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. umask on successful request */ + q_array_invalidate(num_queues, queue_ids); + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_suspended = 0; + + mutex_lock(&p->event_mutex); + dqm_lock(dqm); + + /* unmask queues that suspend or already suspended */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, + queue_ids); + + if (q_idx != QUEUE_NOT_FOUND) { + int err = suspend_single_queue(dqm, pdd, q); + bool is_mes = dqm->dev->kfd->shared_resources.enable_mes; + + if (!err) { + queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK; + if (exception_clear_mask && is_mes) + q->properties.exception_status &= + ~exception_clear_mask; + + if (is_mes) + total_suspended++; + else + per_device_suspended++; + } else if (err != -EBUSY) { + r = err; + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + } + + if (!per_device_suspended) { + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + if (total_suspended) + amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + grace_period); + + if (r) + pr_err("Failed to suspend process queues.\n"); + else + total_suspended += per_device_suspended; + + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, queue_ids); + + if (q_idx == QUEUE_NOT_FOUND) + continue; + + /* mask queue as error on suspend fail */ + if (r) + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + else if (exception_clear_mask) + q->properties.exception_status &= + ~exception_clear_mask; + } + + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + amdgpu_device_flush_hdp(dqm->dev->adev, NULL); + } + + if (total_suspended) { + struct copy_context_work_handler_workarea copy_context_worker; + + INIT_WORK_ONSTACK( + ©_context_worker.copy_context_work, + copy_context_work_handler); + + copy_context_worker.p = p; + + schedule_work(©_context_worker.copy_context_work); + + + flush_work(©_context_worker.copy_context_work); + destroy_work_on_stack(©_context_worker.copy_context_work); + } + + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue suspend\n"); + + kfree(queue_ids); + + return total_suspended; +} + int debug_lock_and_unmap(struct device_queue_manager *dqm) { int r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index bb75d93712eb..d4e6dbffe8c2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -263,6 +263,8 @@ struct device_queue_manager { uint32_t current_logical_xcc_start; uint32_t wait_times; + + wait_queue_head_t destroy_wait; }; void device_queue_manager_init_cik( @@ -290,6 +292,14 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array); +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array); int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index a0ac4f2fe6b5..94c0fc2e57b7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -237,6 +237,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v10_compute_mqd *m; + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -255,6 +256,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * accessible to user mode */ + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 9a9b4e853516..31fec5e70d13 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -291,7 +291,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v11_compute_mqd *m; - /*struct mqd_user_context_save_area_header header;*/ + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -309,16 +309,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * it's part of the context save area that is already * accessible to user mode */ -/* - header.control_stack_size = *ctl_stack_used_size; - header.wave_state_size = *save_area_used_size; + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; - header.wave_state_offset = m->cp_hqd_wg_state_offset; - header.control_stack_offset = m->cp_hqd_cntl_stack_offset; + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; - if (copy_to_user(ctl_stack, &header, sizeof(header))) + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) return -EFAULT; -*/ + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 5b87c244e909..601bb9f68048 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -311,6 +311,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v9_mqd *m; + struct kfd_context_save_area_header header; /* Control stack is located one page after MQD. */ void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); @@ -322,7 +323,18 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, *save_area_used_size = m->cp_hqd_wg_state_offset - m->cp_hqd_cntl_stack_size; - if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + + if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset, + mqd_ctl_stack + m->cp_hqd_cntl_stack_offset, + *ctl_stack_used_size)) return -EFAULT; return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index cd2d56e5cdf0..05da43bf233a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -510,6 +510,8 @@ struct queue_properties { uint32_t doorbell_off; bool is_interop; bool is_evicted; + bool is_suspended; + bool is_being_destroyed; bool is_active; bool is_gws; uint32_t pm4_target_xcc; @@ -535,7 +537,8 @@ struct queue_properties { #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ (q).queue_address != 0 && \ (q).queue_percent > 0 && \ - !(q).is_evicted) + !(q).is_evicted && \ + !(q).is_suspended) enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 70852a200d8f..01ccab607a69 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -187,6 +187,7 @@ static int init_user_queue(struct process_queue_manager *pqm, /* Doorbell initialized in user space*/ q_properties->doorbell_ptr = NULL; + q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW); /* let DQM handle it*/ q_properties->vmid = 0; -- cgit v1.2.3-70-g09d2 From b17bd5dbf64677682a3bca249c64521d5eabcb38 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 10 May 2022 11:15:29 -0400 Subject: drm/amdkfd: add debug queue snapshot operation Allow the debugger to get a snapshot of a specified number of queues containing various queue property information that is copied to the debugger. Since the debugger doesn't know how many queues exist at any given time, allow the debugger to pass the requested number of snapshots as 0 to get the actual number of potential snapshots to use for a subsequent snapshot request for actual information. To prevent future ABI breakage, pass in the requested entry_size. The KFD will return it's own entry_size in case the debugger still wants log the information in a core dump on sizing failure. Also allow the debugger to clear exceptions when doing a snapshot. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 6 ++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 36 +++++++++++++++++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +++ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 40 ++++++++++++++++++++++ 5 files changed, 90 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index b7ee79b5220a..24066756e478 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -3053,6 +3053,12 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v &args->query_exception_info.info_size); break; case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: + r = pqm_get_queue_snapshot(&target->pqm, + args->queue_snapshot.exception_mask, + (void __user *)args->queue_snapshot.snapshot_buf_ptr, + &args->queue_snapshot.num_queues, + &args->queue_snapshot.entry_size); + break; case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: pr_warn("Debug op %i not supported yet\n", args->op); r = -EACCES; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index bc9e81293165..0c1be91a87c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -3041,6 +3041,42 @@ int suspend_queues(struct kfd_process *p, return total_suspended; } +static uint32_t set_queue_type_for_user(struct queue_properties *q_props) +{ + switch (q_props->type) { + case KFD_QUEUE_TYPE_COMPUTE: + return q_props->format == KFD_QUEUE_FORMAT_PM4 + ? KFD_IOC_QUEUE_TYPE_COMPUTE + : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL; + case KFD_QUEUE_TYPE_SDMA: + return KFD_IOC_QUEUE_TYPE_SDMA; + case KFD_QUEUE_TYPE_SDMA_XGMI: + return KFD_IOC_QUEUE_TYPE_SDMA_XGMI; + default: + WARN_ONCE(true, "queue type not recognized!"); + return 0xffffffff; + }; +} + +void set_queue_snapshot_entry(struct queue *q, + uint64_t exception_clear_mask, + struct kfd_queue_snapshot_entry *qss_entry) +{ + qss_entry->ring_base_address = q->properties.queue_address; + qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr; + qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr; + qss_entry->ctx_save_restore_address = + q->properties.ctx_save_restore_area_address; + qss_entry->ctx_save_restore_area_size = + q->properties.ctx_save_restore_area_size; + qss_entry->exception_status = q->properties.exception_status; + qss_entry->queue_id = q->properties.queue_id; + qss_entry->gpu_id = q->device->id; + qss_entry->ring_size = (uint32_t)q->properties.queue_size; + qss_entry->queue_type = set_queue_type_for_user(&q->properties); + q->properties.exception_status &= ~exception_clear_mask; +} + int debug_lock_and_unmap(struct device_queue_manager *dqm) { int r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d4e6dbffe8c2..7dd4b177219d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -300,6 +300,9 @@ int suspend_queues(struct kfd_process *p, int resume_queues(struct kfd_process *p, uint32_t num_queues, uint32_t *usr_queue_id_array); +void set_queue_snapshot_entry(struct queue *q, + uint64_t exception_clear_mask, + struct kfd_queue_snapshot_entry *qss_entry); int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 8ec87bc8ba82..023b17e0116b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1355,6 +1355,11 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size); +int pqm_get_queue_snapshot(struct process_queue_manager *pqm, + uint64_t exception_clear_mask, + void __user *buf, + int *num_qss_entries, + uint32_t *entry_size); int amdkfd_fence_wait_timeout(uint64_t *fence_addr, uint64_t fence_value, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 01ccab607a69..9ad1a2186a24 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -585,6 +585,46 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, save_area_used_size); } +int pqm_get_queue_snapshot(struct process_queue_manager *pqm, + uint64_t exception_clear_mask, + void __user *buf, + int *num_qss_entries, + uint32_t *entry_size) +{ + struct process_queue_node *pqn; + struct kfd_queue_snapshot_entry src; + uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries; + int r = 0; + + *num_qss_entries = 0; + if (!(*entry_size)) + return -EINVAL; + + *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry)); + mutex_lock(&pqm->process->event_mutex); + + memset(&src, 0, sizeof(src)); + + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (!pqn->q) + continue; + + if (*num_qss_entries < tmp_qss_entries) { + set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src); + + if (copy_to_user(buf, &src, *entry_size)) { + r = -EFAULT; + break; + } + buf += tmp_entry_size; + } + *num_qss_entries += 1; + } + + mutex_unlock(&pqm->process->event_mutex); + return r; +} + static int get_queue_data_sizes(struct kfd_process_device *pdd, struct queue *q, uint32_t *mqd_size, -- cgit v1.2.3-70-g09d2