diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 52 |
1 files changed, 34 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 2a8d980d17f6..f6d7f6404a07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -113,49 +113,65 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, return &bo->gem_base; } -int amdgpu_gem_prime_pin(struct drm_gem_object *obj) +static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, + struct device *target_dev, + struct dma_buf_attachment *attach) { + struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - long ret = 0; + long r; - ret = amdgpu_bo_reserve(bo, false); - if (unlikely(ret != 0)) - return ret; + r = drm_gem_map_attach(dma_buf, target_dev, attach); + if (r) + return r; + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + goto error_detach; /* * Wait for all shared fences to complete before we switch to future * use of exclusive fence on this prime shared bo. */ - ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, - MAX_SCHEDULE_TIMEOUT); - if (unlikely(ret < 0)) { - DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); - amdgpu_bo_unreserve(bo); - return ret; + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + if (unlikely(r < 0)) { + DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); + goto error_unreserve; } /* pin buffer into GTT */ - ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); - if (likely(ret == 0)) + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); + if (likely(r == 0)) bo->prime_shared_count++; +error_unreserve: amdgpu_bo_unreserve(bo); - return ret; + +error_detach: + if (r) + drm_gem_map_detach(dma_buf, attach); + return r; } -void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) +static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) { + struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); int ret = 0; ret = amdgpu_bo_reserve(bo, true); if (unlikely(ret != 0)) - return; + goto error; amdgpu_bo_unpin(bo); if (bo->prime_shared_count) bo->prime_shared_count--; amdgpu_bo_unreserve(bo); + +error: + drm_gem_map_detach(dma_buf, attach); } struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) @@ -194,8 +210,8 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, } static const struct dma_buf_ops amdgpu_dmabuf_ops = { - .attach = drm_gem_map_attach, - .detach = drm_gem_map_detach, + .attach = amdgpu_gem_map_attach, + .detach = amdgpu_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, |