summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-03-13 09:09:11 +1000
committerDave Airlie <airlied@redhat.com>2020-03-13 09:09:11 +1000
commit69ddce0970d9d1de63bed9c24eefa0814db29a5a (patch)
tree2e64e14ab5ad2448cb60dcc77a34966dfaa157ee /drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
parent9e12da086e5e38f7b4f8d6e02a82447f4165fbee (diff)
parent5d11e37c021f925496a3a3c019cadf69435f65ed (diff)
Merge tag 'amd-drm-next-5.7-2020-03-10' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.7-2020-03-10: amdgpu: - SR-IOV fixes - Fix up fallout from drm load/unload callback removal - Navi, renoir power management watermark fixes - Refactor smu parameter handling - Display FEC fixes - Display DCC fixes - HDCP fixes - Add support for USB-C PD firmware updates - Pollock detection fix - Rework compute ring priority handling - RAS fixes - Misc cleanups amdkfd: - Consolidate more gfx config details in amdgpu - Consolidate bo alloc flags - Improve code comments - SDMA MQD fixes - Misc cleanups gpu scheduler: - Add suport for modifying the sched list uapi: - Clarify comments about GEM_CREATE flags that are not used by userspace. The kernel driver has always prevented userspace from using these. They are only used internally in the kernel driver. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200310212748.4519-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 7403588684b3..6b9c9193cdfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
return adev->gfx.mec.num_mec > 1;
}
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue)
+{
+ /* Policy: make queue 0 of each pipe as high priority compute queue */
+ return (queue == 0);
+
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
@@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_gfx_process_ras_data_cb,