diff options
author | Dave Airlie <airlied@redhat.com> | 2017-04-07 05:41:42 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-04-07 05:49:12 +1000 |
commit | 0168778115687486575a6831df865dbc4f5369fe (patch) | |
tree | 29a3f1e3348f1ddbc9924611fa0cad738507ba64 /drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | |
parent | aed93ee7d03eac9b7d21f08aebe8a7d9ea069e20 (diff) | |
parent | f4e7c7c1b4ed4c28caf679bc94ca5aa096310c10 (diff) |
Merge branch 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more things for 4.12:
- ttm and amdgpu support for non-contiguous vram CPU mappings
- lots of bug fixes and cleanups for vega10
- misc bug fixes and code cleanups
[airlied: fix do_div error on 32-bit arm, not sure it's 100% correct]
* 'drm-next-4.12' of git://people.freedesktop.org/~agd5f/linux: (58 commits)
drm/amdgpu: use uintptr_t instead of unsigned long to store pointer
drm/amdgpu: Avoid using signed integer to store pointer value
drm/amdgpu:invoke new implemented AI MB func
drm/amdgpu/vega10:timeout set to equal with VI
drm/amdgpu:implement the reset MB func for vega10
drm/amdgpu:fix typo for mxgpu_ai
drm/amdgpu:no need to involv HDP in KIQ
drm/amdgpu:add PSP block only load_type=PSP (v2)
drm/amdgpu/smu9: update to latest driver interface
drm/amd/amdgpu: cleanup gfx_v9_0_gpu_init()
drm/amd/amdgpu: cleanup gfx_v9_0_rlc_reset()
drm/amd/amdgpu: cleanup gfx_v9_0_rlc_start()
drm/amd/amdgpu: simplify gfx_v9_0_cp_gfx_enable()
drm/amd/amdgpu: cleanup gfx_v9_0_kiq_init_register()
drm/amd/amdgpu: Drop gfx_v9_0_print_status()
drm/amd/amdgpu: cleanup gfx_v9_0_set_gfx_eop_interrupt_state()
drm/amd/amdgpu: cleanup gfx_v9_0_set_priv_reg_fault_state()
drm/amd/amdgpu: cleanup gfx_v9_0_set_priv_inst_fault_state()
drm/amd/amdgpu: cleanup gfx_v9_0_init_queue()
drm/amdgpu: Move function amdgpu_has_atpx near other similar functions
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 137 |
1 files changed, 135 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index cfd5e54777bb..1493301b6a94 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -28,6 +28,7 @@ #include "vega10/GC/gc_9_0_offset.h" #include "vega10/GC/gc_9_0_sh_mask.h" #include "soc15.h" +#include "vega10_ih.h" #include "soc15_common.h" #include "mxgpu_ai.h" @@ -133,7 +134,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev) return r; } -static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event) +static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) { int r = 0, timeout = AI_MAILBOX_TIMEDOUT; @@ -172,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_FINI_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { - r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); if (r) return r; } @@ -180,6 +181,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, return 0; } +static int xgpu_ai_request_reset(struct amdgpu_device *adev) +{ + return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); +} + static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, bool init) { @@ -201,7 +207,134 @@ static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, return r; } +static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("get ack intr and do nothing.\n"); + return 0; +} + +static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static void xgpu_ai_mailbox_flr_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + + /* wait until RCV_MSG become 3 */ + if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { + pr_err("failed to recieve FLR_CMPL\n"); + return; + } + + /* Trigger recovery due to world switch failure */ + amdgpu_sriov_gpu_reset(adev, false); +} + +static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int r; + + /* see what event we get */ + r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); + + /* only handle FLR_NOTIFY now */ + if (!r) + schedule_work(&adev->virt.flr_work); + + return 0; +} + +static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { + .set = xgpu_ai_set_mailbox_ack_irq, + .process = xgpu_ai_mailbox_ack_irq, +}; + +static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { + .set = xgpu_ai_set_mailbox_rcv_irq, + .process = xgpu_ai_mailbox_rcv_irq, +}; + +void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->virt.ack_irq.num_types = 1; + adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; + adev->virt.rcv_irq.num_types = 1; + adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; +} + +int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + return 0; +} + +int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); + + return 0; +} + +void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) +{ + amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); +} + const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, + .reset_gpu = xgpu_ai_request_reset, }; |