diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 167 | 
1 files changed, 69 insertions, 98 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 078c068937fe..6b15cad78de9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -88,6 +88,46 @@ struct amdgpu_prt_cb {  	struct dma_fence_cb cb;  }; +/** + * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping + * + * @adev: amdgpu_device pointer + * @vm: amdgpu_vm pointer + * @pasid: the pasid the VM is using on this GPU + * + * Set the pasid this VM is using on this GPU, can also be used to remove the + * pasid by passing in zero. + * + */ +int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, +			u32 pasid) +{ +	int r; + +	if (vm->pasid == pasid) +		return 0; + +	if (vm->pasid) { +		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); +		if (r < 0) +			return r; + +		vm->pasid = 0; +	} + +	if (pasid) { +		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, +					GFP_KERNEL)); +		if (r < 0) +			return r; + +		vm->pasid = pasid; +	} + + +	return 0; +} +  /*   * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS   * happens while holding this lock anywhere to prevent deadlocks when @@ -886,7 +926,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,  	bp.size = amdgpu_vm_bo_size(adev, level);  	bp.byte_align = AMDGPU_GPU_PAGE_SIZE;  	bp.domain = AMDGPU_GEM_DOMAIN_VRAM; -	bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain); +	bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);  	bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |  		AMDGPU_GEM_CREATE_CPU_GTT_USWC; @@ -1178,7 +1218,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,  		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);  	if (vm_flush_needed || pasid_mapping_needed) { -		r = amdgpu_fence_emit(ring, &fence, 0); +		r = amdgpu_fence_emit(ring, &fence, NULL, 0);  		if (r)  			return r;  	} @@ -1758,7 +1798,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,  	r = vm->update_funcs->commit(¶ms, fence);  	if (table_freed) -		*table_freed = params.table_freed; +		*table_freed = *table_freed || params.table_freed;  error_unlock:  	amdgpu_vm_eviction_unlock(vm); @@ -1816,6 +1856,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,   * @adev: amdgpu_device pointer   * @bo_va: requested BO and VM object   * @clear: if true clear the entries + * @table_freed: return true if page table is freed   *   * Fill in the page table entries for @bo_va.   * @@ -1823,7 +1864,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,   * 0 for success, -EINVAL for failure.   */  int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, -			bool clear) +			bool clear, bool *table_freed)  {  	struct amdgpu_bo *bo = bo_va->base.bo;  	struct amdgpu_vm *vm = bo_va->base.vm; @@ -1902,7 +1943,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,  						resv, mapping->start,  						mapping->last, update_flags,  						mapping->offset, mem, -						pages_addr, last_update, NULL); +						pages_addr, last_update, table_freed);  		if (r)  			return r;  	} @@ -2154,7 +2195,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,  	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {  		/* Per VM BOs never need to bo cleared in the page tables */ -		r = amdgpu_vm_bo_update(adev, bo_va, false); +		r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);  		if (r)  			return r;  	} @@ -2173,7 +2214,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,  		else  			clear = true; -		r = amdgpu_vm_bo_update(adev, bo_va, clear); +		r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);  		if (r)  			return r; @@ -2863,14 +2904,13 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)   *   * @adev: amdgpu_device pointer   * @vm: requested vm - * @pasid: Process address space identifier   *   * Init @vm fields.   *   * Returns:   * 0 for success, error for failure.   */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)  {  	struct amdgpu_bo *root_bo;  	struct amdgpu_bo_vm *root; @@ -2944,19 +2984,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)  	amdgpu_bo_unreserve(vm->root.bo); -	if (pasid) { -		unsigned long flags; - -		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); -		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, -			      GFP_ATOMIC); -		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); -		if (r < 0) -			goto error_free_root; - -		vm->pasid = pasid; -	} -  	INIT_KFIFO(vm->faults);  	return 0; @@ -3012,7 +3039,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,   *   * @adev: amdgpu_device pointer   * @vm: requested vm - * @pasid: pasid to use   *   * This only works on GFX VMs that don't have any BOs added and no   * page tables allocated yet. @@ -3020,7 +3046,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,   * Changes the following VM parameters:   * - use_cpu_for_update   * - pte_supports_ats - * - pasid (old PASID is released, because compute manages its own PASIDs)   *   * Reinitializes the page directory to reflect the changed ATS   * setting. @@ -3028,8 +3053,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,   * Returns:   * 0 for success, -errno for errors.   */ -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, -			   u32 pasid) +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)  {  	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);  	int r; @@ -3043,19 +3067,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,  	if (r)  		goto unreserve_bo; -	if (pasid) { -		unsigned long flags; - -		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); -		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, -			      GFP_ATOMIC); -		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - -		if (r == -ENOSPC) -			goto unreserve_bo; -		r = 0; -	} -  	/* Check if PD needs to be reinitialized and do it before  	 * changing any other state, in case it fails.  	 */ @@ -3065,7 +3076,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,  				       to_amdgpu_bo_vm(vm->root.bo),  				       false);  		if (r) -			goto free_idr; +			goto unreserve_bo;  	}  	/* Update VM state */ @@ -3082,7 +3093,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,  		r = amdgpu_bo_sync_wait(vm->root.bo,  					AMDGPU_FENCE_OWNER_UNDEFINED, true);  		if (r) -			goto free_idr; +			goto unreserve_bo;  		vm->update_funcs = &amdgpu_vm_cpu_funcs;  	} else { @@ -3092,36 +3103,11 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,  	vm->last_update = NULL;  	vm->is_compute_context = true; -	if (vm->pasid) { -		unsigned long flags; - -		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); -		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); -		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); - -		/* Free the original amdgpu allocated pasid -		 * Will be replaced with kfd allocated pasid -		 */ -		amdgpu_pasid_free(vm->pasid); -		vm->pasid = 0; -	} -  	/* Free the shadow bo for compute VM */  	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); -	if (pasid) -		vm->pasid = pasid; -  	goto unreserve_bo; -free_idr: -	if (pasid) { -		unsigned long flags; - -		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); -		idr_remove(&adev->vm_manager.pasid_idr, pasid); -		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); -	}  unreserve_bo:  	amdgpu_bo_unreserve(vm->root.bo);  	return r; @@ -3137,14 +3123,7 @@ unreserve_bo:   */  void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)  { -	if (vm->pasid) { -		unsigned long flags; - -		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); -		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); -		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); -	} -	vm->pasid = 0; +	amdgpu_vm_set_pasid(adev, vm, 0);  	vm->is_compute_context = false;  } @@ -3168,15 +3147,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)  	root = amdgpu_bo_ref(vm->root.bo);  	amdgpu_bo_reserve(root, true); -	if (vm->pasid) { -		unsigned long flags; - -		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); -		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); -		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); -		vm->pasid = 0; -	} - +	amdgpu_vm_set_pasid(adev, vm, 0);  	dma_fence_wait(vm->last_unlocked, false);  	dma_fence_put(vm->last_unlocked); @@ -3258,8 +3229,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)  	adev->vm_manager.vm_update_mode = 0;  #endif -	idr_init(&adev->vm_manager.pasid_idr); -	spin_lock_init(&adev->vm_manager.pasid_lock); +	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);  }  /** @@ -3271,8 +3241,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)   */  void amdgpu_vm_manager_fini(struct amdgpu_device *adev)  { -	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); -	idr_destroy(&adev->vm_manager.pasid_idr); +	WARN_ON(!xa_empty(&adev->vm_manager.pasids)); +	xa_destroy(&adev->vm_manager.pasids);  	amdgpu_vmid_mgr_fini(adev);  } @@ -3341,13 +3311,13 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,  	struct amdgpu_vm *vm;  	unsigned long flags; -	spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); +	xa_lock_irqsave(&adev->vm_manager.pasids, flags); -	vm = idr_find(&adev->vm_manager.pasid_idr, pasid); +	vm = xa_load(&adev->vm_manager.pasids, pasid);  	if (vm)  		*task_info = vm->task_info; -	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); +	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);  }  /** @@ -3375,12 +3345,13 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)   * @adev: amdgpu device pointer   * @pasid: PASID of the VM   * @addr: Address of the fault + * @write_fault: true is write fault, false is read fault   *   * Try to gracefully handle a VM fault. Return true if the fault was handled and   * shouldn't be reported any more.   */  bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, -			    uint64_t addr) +			    uint64_t addr, bool write_fault)  {  	bool is_compute_context = false;  	struct amdgpu_bo *root; @@ -3389,15 +3360,15 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,  	struct amdgpu_vm *vm;  	int r; -	spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags); -	vm = idr_find(&adev->vm_manager.pasid_idr, pasid); +	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); +	vm = xa_load(&adev->vm_manager.pasids, pasid);  	if (vm) {  		root = amdgpu_bo_ref(vm->root.bo);  		is_compute_context = vm->is_compute_context;  	} else {  		root = NULL;  	} -	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags); +	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);  	if (!root)  		return false; @@ -3405,7 +3376,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,  	addr /= AMDGPU_GPU_PAGE_SIZE;  	if (is_compute_context && -	    !svm_range_restore_pages(adev, pasid, addr)) { +	    !svm_range_restore_pages(adev, pasid, addr, write_fault)) {  		amdgpu_bo_unref(&root);  		return true;  	} @@ -3415,11 +3386,11 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,  		goto error_unref;  	/* Double check that the VM still exists */ -	spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags); -	vm = idr_find(&adev->vm_manager.pasid_idr, pasid); +	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); +	vm = xa_load(&adev->vm_manager.pasids, pasid);  	if (vm && vm->root.bo != root)  		vm = NULL; -	spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags); +	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);  	if (!vm)  		goto error_unlock;  | 
