summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorGang Ba <gaba@amd.com>2019-08-23 16:01:11 -0400
committerAlex Deucher <alexander.deucher@amd.com>2019-08-27 08:16:26 -0500
commit250af743c044c342916a36517187e6e42a281efe (patch)
treecc8be4a439d30ce4e54d6cda4f1ffc956e0cf80c /drivers/gpu/drm
parentda26ded3b2fff646d28559004195abe353bce49b (diff)
Revert "drm/amdgpu: free up the first paging queue v2"
This reverts commit 4f8bc72fbf10f2dc8bca74d5da08b3a981b2e5cd. It turned out that a single reserved queue wouldn't be sufficient for page fault handling. Signed-off-by: Gang Ba <gaba@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c23
1 files changed, 8 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 9b7e660828c4..1f7c6c2d6e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2503,8 +2503,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
else
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
@@ -2523,22 +2523,15 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
- for (i = 1; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue)
sched = &adev->sdma.instance[i].page.sched;
- adev->vm_manager.vm_pte_rqs[i - 1] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
- adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
- } else {
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ else
sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- }
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {