diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2016-10-25 13:00:45 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2016-10-25 14:40:39 +0200 |
commit | f54d1867005c3323f5d8ad83eed823e84226c429 (patch) | |
tree | 026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |
parent | 0fc4f78f44e6c6148cee32456f0d0023ec1c1fd8 (diff) |
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct,
and so I need to rename the specialised fence/timeline for DMA
operations to make room.
A consensus was reached in
https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html
that making clear this fence applies to DMA operations was a good thing.
Since then the patch has grown a bit as usage increases, so hopefully it
remains a good thing!
(v2...: rebase, rerun spatch)
v3: Compile on msm, spotted a manual fixup that I broke.
v4: Try again for msm, sorry Daniel
coccinelle script:
@@
@@
- struct fence
+ struct dma_fence
@@
@@
- struct fence_ops
+ struct dma_fence_ops
@@
@@
- struct fence_cb
+ struct dma_fence_cb
@@
@@
- struct fence_array
+ struct dma_fence_array
@@
@@
- enum fence_flag_bits
+ enum dma_fence_flag_bits
@@
@@
(
- fence_init
+ dma_fence_init
|
- fence_release
+ dma_fence_release
|
- fence_free
+ dma_fence_free
|
- fence_get
+ dma_fence_get
|
- fence_get_rcu
+ dma_fence_get_rcu
|
- fence_put
+ dma_fence_put
|
- fence_signal
+ dma_fence_signal
|
- fence_signal_locked
+ dma_fence_signal_locked
|
- fence_default_wait
+ dma_fence_default_wait
|
- fence_add_callback
+ dma_fence_add_callback
|
- fence_remove_callback
+ dma_fence_remove_callback
|
- fence_enable_sw_signaling
+ dma_fence_enable_sw_signaling
|
- fence_is_signaled_locked
+ dma_fence_is_signaled_locked
|
- fence_is_signaled
+ dma_fence_is_signaled
|
- fence_is_later
+ dma_fence_is_later
|
- fence_later
+ dma_fence_later
|
- fence_wait_timeout
+ dma_fence_wait_timeout
|
- fence_wait_any_timeout
+ dma_fence_wait_any_timeout
|
- fence_wait
+ dma_fence_wait
|
- fence_context_alloc
+ dma_fence_context_alloc
|
- fence_array_create
+ dma_fence_array_create
|
- to_fence_array
+ to_dma_fence_array
|
- fence_is_array
+ dma_fence_is_array
|
- trace_fence_emit
+ trace_dma_fence_emit
|
- FENCE_TRACE
+ DMA_FENCE_TRACE
|
- FENCE_WARN
+ DMA_FENCE_WARN
|
- FENCE_ERR
+ DMA_FENCE_ERR
)
(
...
)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3a2e42f4b897..57552c79ec58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -48,7 +48,7 @@ */ struct amdgpu_fence { - struct fence base; + struct dma_fence base; /* RB, DMA, etc. */ struct amdgpu_ring *ring; @@ -73,8 +73,8 @@ void amdgpu_fence_slab_fini(void) /* * Cast helper */ -static const struct fence_ops amdgpu_fence_ops; -static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) +static const struct dma_fence_ops amdgpu_fence_ops; +static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); @@ -130,11 +130,11 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct fence *old, **ptr; + struct dma_fence *old, **ptr; uint32_t seq; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); @@ -143,10 +143,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) seq = ++ring->fence_drv.sync_seq; fence->ring = ring; - fence_init(&fence->base, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, - seq); + dma_fence_init(&fence->base, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, + seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, AMDGPU_FENCE_FLAG_INT); @@ -155,12 +155,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) * emitting the fence would mess up the hardware ring buffer. */ old = rcu_dereference_protected(*ptr, 1); - if (old && !fence_is_signaled(old)) { + if (old && !dma_fence_is_signaled(old)) { DRM_INFO("rcu slot is busy\n"); - fence_wait(old, false); + dma_fence_wait(old, false); } - rcu_assign_pointer(*ptr, fence_get(&fence->base)); + rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); *f = &fence->base; @@ -211,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) seq &= drv->num_fences_mask; do { - struct fence *fence, **ptr; + struct dma_fence *fence, **ptr; ++last_seq; last_seq &= drv->num_fences_mask; @@ -224,13 +224,13 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; - r = fence_signal(fence); + r = dma_fence_signal(fence); if (!r) - FENCE_TRACE(fence, "signaled from irq context\n"); + DMA_FENCE_TRACE(fence, "signaled from irq context\n"); else BUG(); - fence_put(fence); + dma_fence_put(fence); } while (last_seq != seq); } @@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); - struct fence *fence, **ptr; + struct dma_fence *fence, **ptr; int r; if (!seq) @@ -269,14 +269,14 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; rcu_read_lock(); fence = rcu_dereference(*ptr); - if (!fence || !fence_get_rcu(fence)) { + if (!fence || !dma_fence_get_rcu(fence)) { rcu_read_unlock(); return 0; } rcu_read_unlock(); - r = fence_wait(fence, false); - fence_put(fence); + r = dma_fence_wait(fence, false); + dma_fence_put(fence); return r; } @@ -452,7 +452,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amd_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) - fence_put(ring->fence_drv.fences[j]); + dma_fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); ring->fence_drv.fences = NULL; ring->fence_drv.initialized = false; @@ -541,12 +541,12 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) * Common fence implementation */ -static const char *amdgpu_fence_get_driver_name(struct fence *fence) +static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) { return "amdgpu"; } -static const char *amdgpu_fence_get_timeline_name(struct fence *f) +static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); return (const char *)fence->ring->name; @@ -560,7 +560,7 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f) * to fence_queue that checks if this fence is signaled, and if so it * signals the fence and removes itself. */ -static bool amdgpu_fence_enable_signaling(struct fence *f) +static bool amdgpu_fence_enable_signaling(struct dma_fence *f) { struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_ring *ring = fence->ring; @@ -568,7 +568,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring); - FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); + DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); return true; } @@ -582,7 +582,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) */ static void amdgpu_fence_free(struct rcu_head *rcu) { - struct fence *f = container_of(rcu, struct fence, rcu); + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); struct amdgpu_fence *fence = to_amdgpu_fence(f); kmem_cache_free(amdgpu_fence_slab, fence); } @@ -595,16 +595,16 @@ static void amdgpu_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amdgpu_fence_release(struct fence *f) +static void amdgpu_fence_release(struct dma_fence *f) { call_rcu(&f->rcu, amdgpu_fence_free); } -static const struct fence_ops amdgpu_fence_ops = { +static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, .enable_signaling = amdgpu_fence_enable_signaling, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = amdgpu_fence_release, }; |