diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 29 |
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3d540b0cf0e1..ca56b5a543b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1003,8 +1003,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) amdgpu_asic_pre_asic_init(adev); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) || - adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) { + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { amdgpu_psp_wait_for_bootloader(adev); ret = amdgpu_atomfirmware_asic_init(adev, true); return ret; @@ -2845,7 +2845,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) { int i, r; - if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) return; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -3098,8 +3098,10 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ if (adev->in_s0ix && - (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) && - (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) + (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= + IP_VERSION(5, 0, 0)) && + (adev->ip_blocks[i].version->type == + AMD_IP_BLOCK_TYPE_SDMA)) continue; /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. @@ -3590,8 +3592,8 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) adev->gfx.mcbp = true; else if (amdgpu_mcbp == 0) adev->gfx.mcbp = false; - else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && + else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) && + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) && adev->gfx.num_gfx_rings) adev->gfx.mcbp = true; @@ -3811,7 +3813,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, * internal path natively support atomics, set have_atomics_support to true. */ } else if ((adev->flags & AMD_IS_APU) && - (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) { + (amdgpu_ip_version(adev, GC_HWIP, 0) > + IP_VERSION(9, 0, 0))) { adev->have_atomics_support = true; } else { adev->have_atomics_support = @@ -5444,8 +5447,9 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ adev->asic_reset_res = r; /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || - adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) + if (amdgpu_ip_version(adev, GC_HWIP, 0) == + IP_VERSION(9, 4, 2) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, reset_context); @@ -5470,7 +5474,8 @@ skip_hw_reset: drm_sched_start(&ring->sched, true); } - if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) + if (adev->enable_mes && + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)) amdgpu_mes_self_test(tmp_adev); if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) @@ -6147,7 +6152,7 @@ bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) return true; default: /* IP discovery */ - if (!adev->ip_versions[DCE_HWIP][0] || + if (!amdgpu_ip_version(adev, DCE_HWIP, 0) || (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; return true; |