diff options
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 66 |
3 files changed, 53 insertions, 23 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 317d2d468a6b..8af3f0fd3073 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -988,6 +988,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.unlocked = unlocked; params.needs_flush = flush_tlb; params.allow_override = allow_override; + INIT_LIST_HEAD(¶ms.tlb_flush_waitlist); /* Implicitly sync to command submissions in the same VM before * unmapping. Sync to moving fences before mapping. @@ -1078,6 +1079,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, tlb_cb = NULL; } + amdgpu_vm_pt_free_list(adev, ¶ms); + error_free: kfree(tlb_cb); amdgpu_vm_eviction_unlock(vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index b0a4fe683352..54d7da396de0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -266,6 +266,11 @@ struct amdgpu_vm_update_params { * to be overridden for NUMA local memory. */ bool allow_override; + + /** + * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush + */ + struct list_head tlb_flush_waitlist; }; struct amdgpu_vm_update_funcs { @@ -547,6 +552,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags); void amdgpu_vm_pt_free_work(struct work_struct *work); +void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, + struct amdgpu_vm_update_params *params); #if defined(CONFIG_DEBUG_FS) void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 601df0ce8290..cbe0ae4c8738 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -622,40 +622,58 @@ void amdgpu_vm_pt_free_work(struct work_struct *work) } /** - * amdgpu_vm_pt_free_dfs - free PD/PT levels + * amdgpu_vm_pt_free_list - free PD/PT levels * * @adev: amdgpu device structure - * @vm: amdgpu vm structure - * @start: optional cursor where to start freeing PDs/PTs - * @unlocked: vm resv unlock status + * @params: see amdgpu_vm_update_params definition * - * Free the page directory or page table level and all sub levels. + * Free the page directory objects saved in the flush list */ -static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_vm_pt_cursor *start, - bool unlocked) +void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, + struct amdgpu_vm_update_params *params) { - struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_bo_base *entry; + struct amdgpu_vm_bo_base *entry, *next; + struct amdgpu_vm *vm = params->vm; + bool unlocked = params->unlocked; + + if (list_empty(¶ms->tlb_flush_waitlist)) + return; if (unlocked) { spin_lock(&vm->status_lock); - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) - list_move(&entry->vm_status, &vm->pt_freed); - - if (start) - list_move(&start->entry->vm_status, &vm->pt_freed); + list_splice_init(¶ms->tlb_flush_waitlist, &vm->pt_freed); spin_unlock(&vm->status_lock); schedule_work(&vm->pt_free_work); return; } - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) + list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status) amdgpu_vm_pt_free(entry); +} - if (start) - amdgpu_vm_pt_free(start->entry); +/** + * amdgpu_vm_pt_add_list - add PD/PT level to the flush list + * + * @params: parameters for the update + * @cursor: first PT entry to start DF search from, non NULL + * + * This list will be freed after TLB flush. + */ +static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, + struct amdgpu_vm_pt_cursor *cursor) +{ + struct amdgpu_vm_pt_cursor seek; + struct amdgpu_vm_bo_base *entry; + + spin_lock(¶ms->vm->status_lock); + for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { + if (entry && entry->bo) + list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); + } + + /* enter start node now */ + list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); + spin_unlock(¶ms->vm->status_lock); } /** @@ -667,7 +685,11 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev, */ void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - amdgpu_vm_pt_free_dfs(adev, vm, NULL, false); + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_vm_bo_base *entry; + + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) + amdgpu_vm_pt_free(entry); } /** @@ -973,9 +995,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, /* Make sure previous mapping is freed */ if (cursor.entry->bo) { params->needs_flush = true; - amdgpu_vm_pt_free_dfs(adev, params->vm, - &cursor, - params->unlocked); + amdgpu_vm_pt_add_list(params, &cursor); } amdgpu_vm_pt_next(adev, &cursor); } |