summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-04-30 11:08:54 +1000
committerDave Airlie <airlied@redhat.com>2020-04-30 11:08:54 +1000
commit937eea297e26effac6809a0bf8c20e6ca9d90b9a (patch)
tree5e2d4ddc284776b56355d36c8d2c5a757956b3d4 /drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
parent126a34061eec2df05a5a28052edefd4e6125f31c (diff)
parente748f07d00c1c4a9106acafac52df7ea4ecf6264 (diff)
Merge tag 'amd-drm-next-5.8-2020-04-24' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.8-2020-04-24: amdgpu: - Documentation improvements - Enable FRU chip access on boards that support it - RAS updates - SR-IOV updates - Powerplay locking fixes for older SMU versions - VCN DPG (dynamic powergating) cleanup - VCN 2.5 DPG enablement - Rework GPU scheduler handling - Improve scheduler priority handling - Add SPM (streaming performance monitor) golden settings for navi - GFX10 clockgating fixes - DC ABM (automatic backlight modulation) fixes - DC cursor and plane fixes - DC watermark fixes - DC clock handling fixes - DC color management fixes - GPU reset fixes - Clean up MMIO access macros - EEPROM access fixes - Misc code cleanups amdkfd: - Misc code cleanups radeon: - Clean up safe reg list generation - Misc code cleanups From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200424190827.4542-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c97
1 files changed, 60 insertions, 37 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 237fa5e16b7c..ce2bf1fb79ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -30,7 +30,6 @@
#include "navi10_ih.h"
#include "soc15_common.h"
#include "mxgpu_nv.h"
-#include "mxgpu_ai.h"
static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
{
@@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
*/
static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
{
- return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
}
@@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
{
u32 reg;
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event)
return -ENOENT;
@@ -110,7 +107,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
timeout -= 10;
} while (timeout > 1);
- pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME;
}
@@ -118,7 +114,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- u32 reg;
int r;
uint8_t trn;
@@ -137,19 +132,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
}
} while (trn);
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
- reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
- MSGBUF_DATA, req);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
- reg);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
- data1);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
- data2);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
- data3);
-
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
xgpu_nv_mailbox_set_valid(adev, true);
/* start to poll ack */
@@ -164,23 +150,48 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
int r;
+ enum idh_event event = -1;
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
- /* start to check msg if request is idh_req_gpu_init_access */
- if (req == IDH_REQ_GPU_INIT_ACCESS ||
- req == IDH_REQ_GPU_FINI_ACCESS ||
- req == IDH_REQ_GPU_RESET_ACCESS) {
- r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ switch (req) {
+ case IDH_REQ_GPU_INIT_ACCESS:
+ case IDH_REQ_GPU_FINI_ACCESS:
+ case IDH_REQ_GPU_RESET_ACCESS:
+ event = IDH_READY_TO_ACCESS_GPU;
+ break;
+ case IDH_REQ_GPU_INIT_DATA:
+ event = IDH_REQ_GPU_INIT_DATA_READY;
+ break;
+ default:
+ break;
+ }
+
+ if (event != -1) {
+ r = xgpu_nv_poll_msg(adev, event);
if (r) {
- pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
- return r;
+ if (req != IDH_REQ_GPU_INIT_DATA) {
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+ return r;
+ }
+ else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+ adev->virt.req_init_data_ver = 0;
+ } else {
+ if (req == IDH_REQ_GPU_INIT_DATA)
+ {
+ adev->virt.req_init_data_ver =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
+
+ /* assume V1 in case host doesn't set version number */
+ if (adev->virt.req_init_data_ver < 1)
+ adev->virt.req_init_data_ver = 1;
+ }
}
+
/* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
- RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
}
}
@@ -213,6 +224,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
return r;
}
+static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -226,11 +242,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 2;
+ else
+ tmp &= ~2;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -282,11 +301,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 1;
+ else
+ tmp &= ~1;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -378,6 +400,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .req_init_data = xgpu_nv_request_init_data,
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,