diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 81 | 
1 files changed, 67 insertions, 14 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f1a857ec1021..07e02c4bf5a8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)  	atomic_dec(&bo->glob->bo_count);  	if (bo->resv == &bo->ttm_resv)  		reservation_object_fini(&bo->ttm_resv); - +	mutex_destroy(&bo->wu_mutex);  	if (bo->destroy)  		bo->destroy(bo);  	else { @@ -429,8 +429,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)  		sync_obj = driver->sync_obj_ref(bo->sync_obj);  	spin_unlock(&bdev->fence_lock); -	if (!ret) +	if (!ret) { + +		/* +		 * Make NO_EVICT bos immediately available to +		 * shrinkers, now that they are queued for +		 * destruction. +		 */ +		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { +			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; +			ttm_bo_add_to_lru(bo); +		} +  		ww_mutex_unlock(&bo->resv->lock); +	}  	kref_get(&bo->list_kref);  	list_add_tail(&bo->ddestroy, &bdev->ddestroy); @@ -986,24 +998,32 @@ out_unlock:  	return ret;  } -static int ttm_bo_mem_compat(struct ttm_placement *placement, -			     struct ttm_mem_reg *mem) +static bool ttm_bo_mem_compat(struct ttm_placement *placement, +			      struct ttm_mem_reg *mem, +			      uint32_t *new_flags)  {  	int i;  	if (mem->mm_node && placement->lpfn != 0 &&  	    (mem->start < placement->fpfn ||  	     mem->start + mem->num_pages > placement->lpfn)) -		return -1; +		return false;  	for (i = 0; i < placement->num_placement; i++) { -		if ((placement->placement[i] & mem->placement & -			TTM_PL_MASK_CACHING) && -			(placement->placement[i] & mem->placement & -			TTM_PL_MASK_MEM)) -			return i; +		*new_flags = placement->placement[i]; +		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && +		    (*new_flags & mem->placement & TTM_PL_MASK_MEM)) +			return true;  	} -	return -1; + +	for (i = 0; i < placement->num_busy_placement; i++) { +		*new_flags = placement->busy_placement[i]; +		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && +		    (*new_flags & mem->placement & TTM_PL_MASK_MEM)) +			return true; +	} + +	return false;  }  int ttm_bo_validate(struct ttm_buffer_object *bo, @@ -1012,6 +1032,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,  			bool no_wait_gpu)  {  	int ret; +	uint32_t new_flags;  	lockdep_assert_held(&bo->resv->lock.base);  	/* Check that range is valid */ @@ -1022,8 +1043,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,  	/*  	 * Check whether we need to move buffer.  	 */ -	ret = ttm_bo_mem_compat(placement, &bo->mem); -	if (ret < 0) { +	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {  		ret = ttm_bo_move_buffer(bo, placement, interruptible,  					 no_wait_gpu);  		if (ret) @@ -1033,7 +1053,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,  		 * Use the access and other non-mapping-related flag bits from  		 * the compatible memory placement flags to the active flags  		 */ -		ttm_flag_masked(&bo->mem.placement, placement->placement[ret], +		ttm_flag_masked(&bo->mem.placement, new_flags,  				~TTM_PL_MASK_MEMTYPE);  	}  	/* @@ -1103,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,  	INIT_LIST_HEAD(&bo->ddestroy);  	INIT_LIST_HEAD(&bo->swap);  	INIT_LIST_HEAD(&bo->io_reserve_lru); +	mutex_init(&bo->wu_mutex);  	bo->bdev = bdev;  	bo->glob = bdev->glob;  	bo->type = type; @@ -1684,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)  		;  }  EXPORT_SYMBOL(ttm_bo_swapout_all); + +/** + * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become + * unreserved + * + * @bo: Pointer to buffer + */ +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) +{ +	int ret; + +	/* +	 * In the absense of a wait_unlocked API, +	 * Use the bo::wu_mutex to avoid triggering livelocks due to +	 * concurrent use of this function. Note that this use of +	 * bo::wu_mutex can go away if we change locking order to +	 * mmap_sem -> bo::reserve. +	 */ +	ret = mutex_lock_interruptible(&bo->wu_mutex); +	if (unlikely(ret != 0)) +		return -ERESTARTSYS; +	if (!ww_mutex_is_locked(&bo->resv->lock)) +		goto out_unlock; +	ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL); +	if (unlikely(ret != 0)) +		goto out_unlock; +	ww_mutex_unlock(&bo->resv->lock); + +out_unlock: +	mutex_unlock(&bo->wu_mutex); +	return ret; +}  | 
