diff options
| author | Samuel Li <Samuel.Li@amd.com> | 2017-12-08 16:18:59 -0500 | 
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2018-02-19 14:17:41 -0500 | 
| commit | 09052fc3769c98e1ce1c4f3398da8201548fc449 (patch) | |
| tree | 3e0458c399b494996ed5268fc2969bb36419b8c7 /drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |
| parent | 6e227308a91db544c6f91edcf37c56764db2ae47 (diff) | |
drm/amdgpu: Move to gtt before cpu accesses dma buf.
To improve cpu read performance. This is implemented for APUs currently.
v2: Adapt to change https://lists.freedesktop.org/archives/amd-gfx/2017-October/015174.html
v3: Adapt to change "forward begin_cpu_access callback to drivers"
v4: Instead of v3, reuse drm_gem dmabuf_ops here. Also some minor fixes as suggested.
v5: only set dma_buf ops when it is valid (Samuel)
Signed-off-by: Samuel Li <Samuel.Li@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 70 | 
1 files changed, 69 insertions, 1 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index ae9c106979d7..8afec21dc45d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -26,6 +26,7 @@  #include <drm/drmP.h>  #include "amdgpu.h" +#include "amdgpu_display.h"  #include <drm/amdgpu_drm.h>  #include <linux/dma-buf.h> @@ -164,6 +165,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)  	return bo->tbo.resv;  } +static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, +				       enum dma_data_direction direction) +{ +	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); +	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); +	struct ttm_operation_ctx ctx = { true, false }; +	u32 domain = amdgpu_display_framebuffer_domains(adev); +	int ret; +	bool reads = (direction == DMA_BIDIRECTIONAL || +		      direction == DMA_FROM_DEVICE); + +	if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) +		return 0; + +	/* move to gtt */ +	ret = amdgpu_bo_reserve(bo, false); +	if (unlikely(ret != 0)) +		return ret; + +	if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { +		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); +		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +	} + +	amdgpu_bo_unreserve(bo); +	return ret; +} + +static const struct dma_buf_ops amdgpu_dmabuf_ops = { +	.attach = drm_gem_map_attach, +	.detach = drm_gem_map_detach, +	.map_dma_buf = drm_gem_map_dma_buf, +	.unmap_dma_buf = drm_gem_unmap_dma_buf, +	.release = drm_gem_dmabuf_release, +	.begin_cpu_access = amdgpu_gem_begin_cpu_access, +	.map = drm_gem_dmabuf_kmap, +	.map_atomic = drm_gem_dmabuf_kmap_atomic, +	.unmap = drm_gem_dmabuf_kunmap, +	.unmap_atomic = drm_gem_dmabuf_kunmap_atomic, +	.mmap = drm_gem_dmabuf_mmap, +	.vmap = drm_gem_dmabuf_vmap, +	.vunmap = drm_gem_dmabuf_vunmap, +}; +  struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,  					struct drm_gem_object *gobj,  					int flags) @@ -176,7 +221,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,  		return ERR_PTR(-EPERM);  	buf = drm_gem_prime_export(dev, gobj, flags); -	if (!IS_ERR(buf)) +	if (!IS_ERR(buf)) {  		buf->file->f_mapping = dev->anon_inode->i_mapping; +		buf->ops = &amdgpu_dmabuf_ops; +	} +  	return buf;  } + +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, +					    struct dma_buf *dma_buf) +{ +	struct drm_gem_object *obj; + +	if (dma_buf->ops == &amdgpu_dmabuf_ops) { +		obj = dma_buf->priv; +		if (obj->dev == dev) { +			/* +			 * Importing dmabuf exported from out own gem increases +			 * refcount on gem itself instead of f_count of dmabuf. +			 */ +			drm_gem_object_get(obj); +			return obj; +		} +	} + +	return drm_gem_prime_import(dev, dma_buf); +} | 
