diff options
author | Maxime Ripard <maxime@cerno.tech> | 2021-04-26 14:03:09 +0200 |
---|---|---|
committer | Maxime Ripard <maxime@cerno.tech> | 2021-04-26 14:03:09 +0200 |
commit | 355b60296143a090039211c5f0e1463f84aab65a (patch) | |
tree | b74d4ef2aea66252ea9cf77c847de6c6e72a02b7 /drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | |
parent | 91185d55b32e7e377f15fb46a62b216f8d3038d4 (diff) | |
parent | a1a1ca70deb3ec600eeabb21de7f3f48aaae5695 (diff) |
Merge drm/drm-next into drm-misc-next
Christian needs some patches from drm/next
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 167 |
1 files changed, 160 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index def583916294..3f15bf34123a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -50,6 +50,9 @@ #define VCN_INSTANCES_SIENNA_CICHLID 2 #define DEC_SW_RING_ENABLED FALSE +#define RDECODE_MSG_CREATE 0x00000000 +#define RDECODE_MESSAGE_CREATE 0x00000001 + static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, SOC15_IH_CLIENTID_VCN1 @@ -171,6 +174,7 @@ static int vcn_v3_0_sw_init(void *handle) for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_fw_shared *fw_shared; + if (adev->vcn.harvest_config & (1 << i)) continue; @@ -198,6 +202,8 @@ static int vcn_v3_0_sw_init(void *handle) if (r) return r; + atomic_set(&adev->vcn.inst[i].sched_score, 0); + ring = &adev->vcn.inst[i].ring_dec; ring->use_doorbell = true; if (amdgpu_sriov_vf(adev)) { @@ -205,11 +211,10 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; } - if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0) - ring->no_scheduler = true; sprintf(ring->name, "vcn_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, - AMDGPU_RING_PRIO_DEFAULT); + AMDGPU_RING_PRIO_DEFAULT, + &adev->vcn.inst[i].sched_score); if (r) return r; @@ -227,18 +232,18 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; } - if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1) - ring->no_scheduler = true; sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, - AMDGPU_RING_PRIO_DEFAULT); + AMDGPU_RING_PRIO_DEFAULT, + &adev->vcn.inst[i].sched_score); if (r) return r; } fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) | - cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG); + cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | + cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); } @@ -1074,7 +1079,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + /* Reset FW shared memory RBC WPTR/RPTR */ + fw_shared->rb.rptr = 0; + fw_shared->rb.wptr = lower_32_bits(ring->wptr); + + /*resetting done, fw can check RB ring */ fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + /* Unstall DPG */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); @@ -1239,9 +1250,11 @@ static int vcn_v3_0_start(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0); + WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0); ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR); WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->rb.wptr = lower_32_bits(ring->wptr); fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); @@ -1662,6 +1675,10 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + /* restore wptr/rptr with pointers saved in FW shared memory*/ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr); + /* Unstall DPG */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); @@ -1721,6 +1738,15 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; + volatile struct amdgpu_fw_shared *fw_shared; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ + fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr; + fw_shared->rb.wptr = lower_32_bits(ring->wptr); + WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2, + lower_32_bits(ring->wptr)); + } if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); @@ -1822,6 +1848,132 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; +static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) +{ + struct drm_gpu_scheduler **scheds; + + /* The create msg must be in the first IB submitted */ + if (atomic_read(&p->entity->fence_seq)) + return -EINVAL; + + scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] + [AMDGPU_RING_PRIO_DEFAULT].sched; + drm_sched_entity_modify_sched(p->entity, scheds, 1); + return 0; +} + +static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + struct amdgpu_bo_va_mapping *map; + uint32_t *msg, num_buffers; + struct amdgpu_bo *bo; + uint64_t start, end; + unsigned int i; + void * ptr; + int r; + + addr &= AMDGPU_GMC_HOLE_MASK; + r = amdgpu_cs_find_mapping(p, addr, &bo, &map); + if (r) { + DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); + return r; + } + + start = map->start * AMDGPU_GPU_PAGE_SIZE; + end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; + if (addr & 0x7) { + DRM_ERROR("VCN messages must be 8 byte aligned!\n"); + return -EINVAL; + } + + bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) { + DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); + return r; + } + + r = amdgpu_bo_kmap(bo, &ptr); + if (r) { + DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); + return r; + } + + msg = ptr + addr - start; + + /* Check length */ + if (msg[1] > end - addr) { + r = -EINVAL; + goto out; + } + + if (msg[3] != RDECODE_MSG_CREATE) + goto out; + + num_buffers = msg[2]; + for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { + uint32_t offset, size, *create; + + if (msg[0] != RDECODE_MESSAGE_CREATE) + continue; + + offset = msg[1]; + size = msg[2]; + + if (offset + size > end) { + r = -EINVAL; + goto out; + } + + create = ptr + addr + offset - start; + + /* H246, HEVC and VP9 can run on any instance */ + if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) + continue; + + r = vcn_v3_0_limit_sched(p); + if (r) + goto out; + } + +out: + amdgpu_bo_kunmap(bo); + return r; +} + +static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, + uint32_t ib_idx) +{ + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); + struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; + uint32_t msg_lo = 0, msg_hi = 0; + unsigned i; + int r; + + /* The first instance can decode anything */ + if (!ring->me) + return 0; + + for (i = 0; i < ib->length_dw; i += 2) { + uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i); + uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1); + + if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) { + msg_lo = val; + } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) { + msg_hi = val; + } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && + val == 0) { + r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); + if (r) + return r; + } + } + return 0; +} + static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, @@ -1829,6 +1981,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .get_rptr = vcn_v3_0_dec_ring_get_rptr, .get_wptr = vcn_v3_0_dec_ring_get_wptr, .set_wptr = vcn_v3_0_dec_ring_set_wptr, + .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + |