diff options
author | Dave Airlie <airlied@redhat.com> | 2017-12-07 06:28:22 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-12-07 06:28:22 +1000 |
commit | 9c606cd4117a3c45e04a6616b1a0dbeb18eeee62 (patch) | |
tree | aa6c1db29e1a3f687c81fa03aecd24992a76e993 /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |
parent | c5dd52f653fa74f8f4771425c6db33609ad21258 (diff) | |
parent | 3997eea57caf542e9327df9b6bb2882a57c4c421 (diff) |
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
First feature request for 4.16. Highlights:
- RV and Vega header cleanups
- TTM operation context support
- 48 bit GPUVM fixes for Vega/RV
- More smatch fixes
- ECC support for vega10
- Resizeable BAR support
- Multi-display sync support in DC
- SR-IOV fixes
- Various scheduler improvements
- GPU reset fixes and vram lost tracking
- Clean up DC/powerplay interfaces
- DCN display fixes
- Various DC fixes
* 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: (291 commits)
drm/radeon: Use drm_fb_helper_lastclose() and _poll_changed()
drm/amdgpu: Use drm_fb_helper_lastclose() and _poll_changed()
drm/amd/display: Use drm_fb_helper_poll_changed()
drm/ttm: swap consecutive allocated pooled pages v4
drm/amdgpu: fix amdgpu_sync_resv v2
drm/ttm: swap consecutive allocated cached pages v3
drm/amd/amdgpu: set gtt size according to system memory size only
drm/amdgpu: Get rid of dep_sync as a seperate object.
drm/amdgpu: allow specifying vm_block_size for multi level PDs v2
drm/amdgpu: move validation of the VM size into the VM code
drm/amdgpu: allow non pot VM size values
drm/amdgpu: choose number of VM levels based on VM size
drm/amdgpu: unify VM size handling of Vega10 with older generation
drm/amdgpu: fix amdgpu_vm_num_entries
drm/amdgpu: fix VM PD addr shift
drm/amdgpu: correct vce4.0 fw config for SRIOV (V2)
drm/amd/display: Don't call dm_log_to_buffer directly in dc_conn_log
drm/amd/display: Add dm_logger_append_va API
drm/ttm: Use a static string instead of an array of char *
drm/amd/display: remove usage of legacy_cursor_update
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a4bf21f8f1c1..ebe1ffbab0c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -35,6 +35,7 @@ struct amdgpu_sync_entry { struct hlist_node node; struct dma_fence *fence; + bool explicit; }; static struct kmem_cache *amdgpu_sync_slab; @@ -141,7 +142,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) * */ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f) + struct dma_fence *f, bool explicit) { struct amdgpu_sync_entry *e; @@ -159,6 +160,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, if (!e) return -ENOMEM; + e->explicit = explicit; + hash_add(sync->fences, &e->node, f->context); e->fence = dma_fence_get(f); return 0; @@ -189,10 +192,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, /* always sync to the exclusive fence */ f = reservation_object_get_excl(resv); - r = amdgpu_sync_fence(adev, sync, f); - - if (explicit_sync) - return r; + r = amdgpu_sync_fence(adev, sync, f, false); flist = reservation_object_get_list(resv); if (!flist || r) @@ -212,15 +212,15 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, (fence_owner == AMDGPU_FENCE_OWNER_VM))) continue; - /* Ignore fence from the same owner as + /* Ignore fence from the same owner and explicit one as * long as it isn't undefined. */ if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && - fence_owner == owner) + (fence_owner == owner || explicit_sync)) continue; } - r = amdgpu_sync_fence(adev, sync, f); + r = amdgpu_sync_fence(adev, sync, f, false); if (r) break; } @@ -275,19 +275,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, * amdgpu_sync_get_fence - get the next fence from the sync object * * @sync: sync object to use + * @explicit: true if the next fence is explicit * * Get and removes the next fence from the sync object not signaled yet. */ -struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; struct dma_fence *f; int i; - hash_for_each_safe(sync->fences, i, tmp, e, node) { f = e->fence; + if (explicit) + *explicit = e->explicit; hash_del(&e->node); kmem_cache_free(amdgpu_sync_slab, e); |