diff options
author | Maxime Ripard <maxime@cerno.tech> | 2021-04-26 14:03:09 +0200 |
---|---|---|
committer | Maxime Ripard <maxime@cerno.tech> | 2021-04-26 14:03:09 +0200 |
commit | 355b60296143a090039211c5f0e1463f84aab65a (patch) | |
tree | b74d4ef2aea66252ea9cf77c847de6c6e72a02b7 /drivers/gpu/drm/i915/gem/i915_gem_userptr.c | |
parent | 91185d55b32e7e377f15fb46a62b216f8d3038d4 (diff) | |
parent | a1a1ca70deb3ec600eeabb21de7f3f48aaae5695 (diff) |
Merge drm/drm-next into drm-misc-next
Christian needs some patches from drm/next
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_userptr.c')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 918 |
1 files changed, 321 insertions, 597 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index f2eaed6aca3d..a657b99ec760 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -2,10 +2,39 @@ * SPDX-License-Identifier: MIT * * Copyright © 2012-2014 Intel Corporation + * + * Based on amdgpu_mn, which bears the following notice: + * + * Copyright 2014 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: + * Christian König <christian.koenig@amd.com> */ #include <linux/mmu_context.h> -#include <linux/mmu_notifier.h> #include <linux/mempolicy.h> #include <linux/swap.h> #include <linux/sched/mm.h> @@ -15,408 +44,121 @@ #include "i915_gem_object.h" #include "i915_scatterlist.h" -struct i915_mm_struct { - struct mm_struct *mm; - struct drm_i915_private *i915; - struct i915_mmu_notifier *mn; - struct hlist_node node; - struct kref kref; - struct rcu_work work; -}; - -#if defined(CONFIG_MMU_NOTIFIER) -#include <linux/interval_tree.h> +#ifdef CONFIG_MMU_NOTIFIER -struct i915_mmu_notifier { - spinlock_t lock; - struct hlist_node node; - struct mmu_notifier mn; - struct rb_root_cached objects; - struct i915_mm_struct *mm; -}; - -struct i915_mmu_object { - struct i915_mmu_notifier *mn; - struct drm_i915_gem_object *obj; - struct interval_tree_node it; -}; - -static void add_object(struct i915_mmu_object *mo) +/** + * i915_gem_userptr_invalidate - callback to notify about mm change + * + * @mni: the range (mm) is about to update + * @range: details on the invalidation + * @cur_seq: Value to pass to mmu_interval_set_seq() + * + * Block for operations on BOs to finish and mark pages as accessed and + * potentially dirty. + */ +static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) { - GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb)); - interval_tree_insert(&mo->it, &mo->mn->objects); -} + struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + long r; -static void del_object(struct i915_mmu_object *mo) -{ - if (RB_EMPTY_NODE(&mo->it.rb)) - return; + if (!mmu_notifier_range_blockable(range)) + return false; - interval_tree_remove(&mo->it, &mo->mn->objects); - RB_CLEAR_NODE(&mo->it.rb); -} + spin_lock(&i915->mm.notifier_lock); -static void -__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) -{ - struct i915_mmu_object *mo = obj->userptr.mmu_object; + mmu_interval_set_seq(mni, cur_seq); + + spin_unlock(&i915->mm.notifier_lock); /* - * During mm_invalidate_range we need to cancel any userptr that - * overlaps the range being invalidated. Doing so requires the - * struct_mutex, and that risks recursion. In order to cause - * recursion, the user must alias the userptr address space with - * a GTT mmapping (possible with a MAP_FIXED) - then when we have - * to invalidate that mmaping, mm_invalidate_range is called with - * the userptr address *and* the struct_mutex held. To prevent that - * we set a flag under the i915_mmu_notifier spinlock to indicate - * whether this object is valid. + * We don't wait when the process is exiting. This is valid + * because the object will be cleaned up anyway. + * + * This is also temporarily required as a hack, because we + * cannot currently force non-consistent batch buffers to preempt + * and reschedule by waiting on it, hanging processes on exit. */ - if (!mo) - return; + if (current->flags & PF_EXITING) + return true; - spin_lock(&mo->mn->lock); - if (value) - add_object(mo); - else - del_object(mo); - spin_unlock(&mo->mn->lock); -} - -static int -userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, - const struct mmu_notifier_range *range) -{ - struct i915_mmu_notifier *mn = - container_of(_mn, struct i915_mmu_notifier, mn); - struct interval_tree_node *it; - unsigned long end; - int ret = 0; - - if (RB_EMPTY_ROOT(&mn->objects.rb_root)) - return 0; - - /* interval ranges are inclusive, but invalidate range is exclusive */ - end = range->end - 1; - - spin_lock(&mn->lock); - it = interval_tree_iter_first(&mn->objects, range->start, end); - while (it) { - struct drm_i915_gem_object *obj; - - if (!mmu_notifier_range_blockable(range)) { - ret = -EAGAIN; - break; - } - - /* - * The mmu_object is released late when destroying the - * GEM object so it is entirely possible to gain a - * reference on an object in the process of being freed - * since our serialisation is via the spinlock and not - * the struct_mutex - and consequently use it after it - * is freed and then double free it. To prevent that - * use-after-free we only acquire a reference on the - * object if it is not in the process of being destroyed. - */ - obj = container_of(it, struct i915_mmu_object, it)->obj; - if (!kref_get_unless_zero(&obj->base.refcount)) { - it = interval_tree_iter_next(it, range->start, end); - continue; - } - spin_unlock(&mn->lock); - - ret = i915_gem_object_unbind(obj, - I915_GEM_OBJECT_UNBIND_ACTIVE | - I915_GEM_OBJECT_UNBIND_BARRIER); - if (ret == 0) - ret = __i915_gem_object_put_pages(obj); - i915_gem_object_put(obj); - if (ret) - return ret; - - spin_lock(&mn->lock); - - /* - * As we do not (yet) protect the mmu from concurrent insertion - * over this range, there is no guarantee that this search will - * terminate given a pathologic workload. - */ - it = interval_tree_iter_first(&mn->objects, range->start, end); - } - spin_unlock(&mn->lock); - - return ret; + /* we will unbind on next submission, still have userptr pins */ + r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + if (r <= 0) + drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); + return true; } -static const struct mmu_notifier_ops i915_gem_userptr_notifier = { - .invalidate_range_start = userptr_mn_invalidate_range_start, +static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = { + .invalidate = i915_gem_userptr_invalidate, }; -static struct i915_mmu_notifier * -i915_mmu_notifier_create(struct i915_mm_struct *mm) -{ - struct i915_mmu_notifier *mn; - - mn = kmalloc(sizeof(*mn), GFP_KERNEL); - if (mn == NULL) - return ERR_PTR(-ENOMEM); - - spin_lock_init(&mn->lock); - mn->mn.ops = &i915_gem_userptr_notifier; - mn->objects = RB_ROOT_CACHED; - mn->mm = mm; - - return mn; -} - -static void -i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) -{ - struct i915_mmu_object *mo; - - mo = fetch_and_zero(&obj->userptr.mmu_object); - if (!mo) - return; - - spin_lock(&mo->mn->lock); - del_object(mo); - spin_unlock(&mo->mn->lock); - kfree(mo); -} - -static struct i915_mmu_notifier * -i915_mmu_notifier_find(struct i915_mm_struct *mm) -{ - struct i915_mmu_notifier *mn, *old; - int err; - - mn = READ_ONCE(mm->mn); - if (likely(mn)) - return mn; - - mn = i915_mmu_notifier_create(mm); - if (IS_ERR(mn)) - return mn; - - err = mmu_notifier_register(&mn->mn, mm->mm); - if (err) { - kfree(mn); - return ERR_PTR(err); - } - - old = cmpxchg(&mm->mn, NULL, mn); - if (old) { - mmu_notifier_unregister(&mn->mn, mm->mm); - kfree(mn); - mn = old; - } - - return mn; -} - static int -i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, - unsigned flags) +i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj) { - struct i915_mmu_notifier *mn; - struct i915_mmu_object *mo; - - if (flags & I915_USERPTR_UNSYNCHRONIZED) - return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; - - if (GEM_WARN_ON(!obj->userptr.mm)) - return -EINVAL; - - mn = i915_mmu_notifier_find(obj->userptr.mm); - if (IS_ERR(mn)) - return PTR_ERR(mn); - - mo = kzalloc(sizeof(*mo), GFP_KERNEL); - if (!mo) - return -ENOMEM; - - mo->mn = mn; - mo->obj = obj; - mo->it.start = obj->userptr.ptr; - mo->it.last = obj->userptr.ptr + obj->base.size - 1; - RB_CLEAR_NODE(&mo->it.rb); - - obj->userptr.mmu_object = mo; - return 0; -} - -static void -i915_mmu_notifier_free(struct i915_mmu_notifier *mn, - struct mm_struct *mm) -{ - if (mn == NULL) - return; - - mmu_notifier_unregister(&mn->mn, mm); - kfree(mn); -} - -#else - -static void -__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) -{ -} - -static void -i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) -{ -} - -static int -i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, - unsigned flags) -{ - if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) - return -ENODEV; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - return 0; -} - -static void -i915_mmu_notifier_free(struct i915_mmu_notifier *mn, - struct mm_struct *mm) -{ -} - -#endif - -static struct i915_mm_struct * -__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real) -{ - struct i915_mm_struct *it, *mm = NULL; - - rcu_read_lock(); - hash_for_each_possible_rcu(i915->mm_structs, - it, node, - (unsigned long)real) - if (it->mm == real && kref_get_unless_zero(&it->kref)) { - mm = it; - break; - } - rcu_read_unlock(); - - return mm; + return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm, + obj->userptr.ptr, obj->base.size, + &i915_gem_userptr_notifier_ops); } -static int -i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) +static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_mm_struct *mm, *new; - int ret = 0; - - /* During release of the GEM object we hold the struct_mutex. This - * precludes us from calling mmput() at that time as that may be - * the last reference and so call exit_mmap(). exit_mmap() will - * attempt to reap the vma, and if we were holding a GTT mmap - * would then call drm_gem_vm_close() and attempt to reacquire - * the struct mutex. So in order to avoid that recursion, we have - * to defer releasing the mm reference until after we drop the - * struct_mutex, i.e. we need to schedule a worker to do the clean - * up. - */ - mm = __i915_mm_struct_find(i915, current->mm); - if (mm) - goto out; - - new = kmalloc(sizeof(*mm), GFP_KERNEL); - if (!new) - return -ENOMEM; + struct page **pvec = NULL; - kref_init(&new->kref); - new->i915 = to_i915(obj->base.dev); - new->mm = current->mm; - new->mn = NULL; - - spin_lock(&i915->mm_lock); - mm = __i915_mm_struct_find(i915, current->mm); - if (!mm) { - hash_add_rcu(i915->mm_structs, - &new->node, - (unsigned long)new->mm); - mmgrab(current->mm); - mm = new; + spin_lock(&i915->mm.notifier_lock); + if (!--obj->userptr.page_ref) { + pvec = obj->userptr.pvec; + obj->userptr.pvec = NULL; } - spin_unlock(&i915->mm_lock); - if (mm != new) - kfree(new); - -out: - obj->userptr.mm = mm; - return ret; -} - -static void -__i915_mm_struct_free__worker(struct work_struct *work) -{ - struct i915_mm_struct *mm = container_of(work, typeof(*mm), work.work); - - i915_mmu_notifier_free(mm->mn, mm->mm); - mmdrop(mm->mm); - kfree(mm); -} - -static void -__i915_mm_struct_free(struct kref *kref) -{ - struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); - - spin_lock(&mm->i915->mm_lock); - hash_del_rcu(&mm->node); - spin_unlock(&mm->i915->mm_lock); + GEM_BUG_ON(obj->userptr.page_ref < 0); + spin_unlock(&i915->mm.notifier_lock); - INIT_RCU_WORK(&mm->work, __i915_mm_struct_free__worker); - queue_rcu_work(system_wq, &mm->work); -} - -static void -i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) -{ - if (obj->userptr.mm == NULL) - return; + if (pvec) { + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; - kref_put(&obj->userptr.mm->kref, __i915_mm_struct_free); - obj->userptr.mm = NULL; + unpin_user_pages(pvec, num_pages); + kvfree(pvec); + } } -struct get_pages_work { - struct work_struct work; - struct drm_i915_gem_object *obj; - struct task_struct *task; -}; - -static struct sg_table * -__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, - struct page **pvec, unsigned long num_pages) +static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; unsigned int sg_page_sizes; struct scatterlist *sg; + struct page **pvec; int ret; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) - return ERR_PTR(-ENOMEM); + return -ENOMEM; + + spin_lock(&i915->mm.notifier_lock); + if (GEM_WARN_ON(!obj->userptr.page_ref)) { + spin_unlock(&i915->mm.notifier_lock); + ret = -EFAULT; + goto err_free; + } + + obj->userptr.page_ref++; + pvec = obj->userptr.pvec; + spin_unlock(&i915->mm.notifier_lock); alloc_table: sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, num_pages << PAGE_SHIFT, max_segment, NULL, 0, GFP_KERNEL); if (IS_ERR(sg)) { - kfree(st); - return ERR_CAST(sg); + ret = PTR_ERR(sg); + goto err; } ret = i915_gem_gtt_prepare_pages(obj, st); @@ -428,203 +170,20 @@ alloc_table: goto alloc_table; } - kfree(st); - return ERR_PTR(ret); + goto err; } sg_page_sizes = i915_sg_page_sizes(st->sgl); __i915_gem_object_set_pages(obj, st, sg_page_sizes); - return st; -} - -static void -__i915_gem_userptr_get_pages_worker(struct work_struct *_work) -{ - struct get_pages_work *work = container_of(_work, typeof(*work), work); - struct drm_i915_gem_object *obj = work->obj; - const unsigned long npages = obj->base.size >> PAGE_SHIFT; - unsigned long pinned; - struct page **pvec; - int ret; - - ret = -ENOMEM; - pinned = 0; - - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (pvec != NULL) { - struct mm_struct *mm = obj->userptr.mm->mm; - unsigned int flags = 0; - int locked = 0; - - if (!i915_gem_object_is_readonly(obj)) - flags |= FOLL_WRITE; - - ret = -EFAULT; - if (mmget_not_zero(mm)) { - while (pinned < npages) { - if (!locked) { - mmap_read_lock(mm); - locked = 1; - } - ret = pin_user_pages_remote - (mm, - obj->userptr.ptr + pinned * PAGE_SIZE, - npages - pinned, - flags, - pvec + pinned, NULL, &locked); - if (ret < 0) - break; - - pinned += ret; - } - if (locked) - mmap_read_unlock(mm); - mmput(mm); - } - } - - mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); - if (obj->userptr.work == &work->work) { - struct sg_table *pages = ERR_PTR(ret); - - if (pinned == npages) { - pages = __i915_gem_userptr_alloc_pages(obj, pvec, - npages); - if (!IS_ERR(pages)) { - pinned = 0; - pages = NULL; - } - } - - obj->userptr.work = ERR_CAST(pages); - if (IS_ERR(pages)) - __i915_gem_userptr_set_active(obj, false); - } - mutex_unlock(&obj->mm.lock); - - unpin_user_pages(pvec, pinned); - kvfree(pvec); - - i915_gem_object_put(obj); - put_task_struct(work->task); - kfree(work); -} - -static struct sg_table * -__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) -{ - struct get_pages_work *work; - - /* Spawn a worker so that we can acquire the - * user pages without holding our mutex. Access - * to the user pages requires mmap_lock, and we have - * a strict lock ordering of mmap_lock, struct_mutex - - * we already hold struct_mutex here and so cannot - * call gup without encountering a lock inversion. - * - * Userspace will keep on repeating the operation - * (thanks to EAGAIN) until either we hit the fast - * path or the worker completes. If the worker is - * cancelled or superseded, the task is still run - * but the results ignored. (This leads to - * complications that we may have a stray object - * refcount that we need to be wary of when - * checking for existing objects during creation.) - * If the worker encounters an error, it reports - * that error back to this function through - * obj->userptr.work = ERR_PTR. - */ - work = kmalloc(sizeof(*work), GFP_KERNEL); - if (work == NULL) - return ERR_PTR(-ENOMEM); - - obj->userptr.work = &work->work; - - work->obj = i915_gem_object_get(obj); - - work->task = current; - get_task_struct(work->task); - - INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); - queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work); - - return ERR_PTR(-EAGAIN); -} - -static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) -{ - const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; - struct mm_struct *mm = obj->userptr.mm->mm; - struct page **pvec; - struct sg_table *pages; - bool active; - int pinned; - unsigned int gup_flags = 0; - - /* If userspace should engineer that these pages are replaced in - * the vma between us binding this page into the GTT and completion - * of rendering... Their loss. If they change the mapping of their - * pages they need to create a new bo to point to the new vma. - * - * However, that still leaves open the possibility of the vma - * being copied upon fork. Which falls under the same userspace - * synchronisation issue as a regular bo, except that this time - * the process may not be expecting that a particular piece of - * memory is tied to the GPU. - * - * Fortunately, we can hook into the mmu_notifier in order to - * discard the page references prior to anything nasty happening - * to the vma (discard or cloning) which should prevent the more - * egregious cases from causing harm. - */ - - if (obj->userptr.work) { - /* active flag should still be held for the pending work */ - if (IS_ERR(obj->userptr.work)) - return PTR_ERR(obj->userptr.work); - else - return -EAGAIN; - } - - pvec = NULL; - pinned = 0; - - if (mm == current->mm) { - pvec = kvmalloc_array(num_pages, sizeof(struct page *), - GFP_KERNEL | - __GFP_NORETRY | - __GFP_NOWARN); - if (pvec) { - /* defer to worker if malloc fails */ - if (!i915_gem_object_is_readonly(obj)) - gup_flags |= FOLL_WRITE; - pinned = pin_user_pages_fast_only(obj->userptr.ptr, - num_pages, gup_flags, - pvec); - } - } - - active = false; - if (pinned < 0) { - pages = ERR_PTR(pinned); - pinned = 0; - } else if (pinned < num_pages) { - pages = __i915_gem_userptr_get_pages_schedule(obj); - active = pages == ERR_PTR(-EAGAIN); - } else { - pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); - active = !IS_ERR(pages); - } - if (active) - __i915_gem_userptr_set_active(obj, true); - - if (IS_ERR(pages)) - unpin_user_pages(pvec, pinned); - kvfree(pvec); + return 0; - return PTR_ERR_OR_ZERO(pages); +err: + i915_gem_object_userptr_drop_ref(obj); +err_free: + kfree(st); + return ret; } static void @@ -634,9 +193,6 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, struct sgt_iter sgt_iter; struct page *page; - /* Cancel any inflight work and force them to restart their gup */ - obj->userptr.work = NULL; - __i915_gem_userptr_set_active(obj, false); if (!pages) return; @@ -676,42 +232,224 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, } mark_page_accessed(page); - unpin_user_page(page); } obj->mm.dirty = false; sg_free_table(pages); kfree(pages); + + i915_gem_object_userptr_drop_ref(obj); +} + +static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool get_pages) +{ + struct sg_table *pages; + int err; + + err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); + if (err) + return err; + + if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj))) + return -EBUSY; + + assert_object_held(obj); + + pages = __i915_gem_object_unset_pages(obj); + if (!IS_ERR_OR_NULL(pages)) + i915_gem_userptr_put_pages(obj, pages); + + if (get_pages) + err = ____i915_gem_object_get_pages(obj); + + return err; +} + +int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; + struct page **pvec; + unsigned int gup_flags = 0; + unsigned long notifier_seq; + int pinned, ret; + + if (obj->userptr.notifier.mm != current->mm) + return -EFAULT; + + ret = i915_gem_object_lock_interruptible(obj, NULL); + if (ret) + return ret; + + /* optimistically try to preserve current pages while unlocked */ + if (i915_gem_object_has_pages(obj) && + !mmu_interval_check_retry(&obj->userptr.notifier, + obj->userptr.notifier_seq)) { + spin_lock(&i915->mm.notifier_lock); + if (obj->userptr.pvec && + !mmu_interval_read_retry(&obj->userptr.notifier, + obj->userptr.notifier_seq)) { + obj->userptr.page_ref++; + + /* We can keep using the current binding, this is the fastpath */ + ret = 1; + } + spin_unlock(&i915->mm.notifier_lock); + } + + if (!ret) { + /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */ + ret = i915_gem_object_userptr_unbind(obj, false); + } + i915_gem_object_unlock(obj); + if (ret < 0) + return ret; + + if (ret > 0) + return 0; + + notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier); + + pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); + if (!pvec) + return -ENOMEM; + + if (!i915_gem_object_is_readonly(obj)) + gup_flags |= FOLL_WRITE; + + pinned = ret = 0; + while (pinned < num_pages) { + ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE, + num_pages - pinned, gup_flags, + &pvec[pinned]); + if (ret < 0) + goto out; + + pinned += ret; + } + ret = 0; + + spin_lock(&i915->mm.notifier_lock); + + if (mmu_interval_read_retry(&obj->userptr.notifier, + !obj->userptr.page_ref ? notifier_seq : + obj->userptr.notifier_seq)) { + ret = -EAGAIN; + goto out_unlock; + } + + if (!obj->userptr.page_ref++) { + obj->userptr.pvec = pvec; + obj->userptr.notifier_seq = notifier_seq; + + pvec = NULL; + } + +out_unlock: + spin_unlock(&i915->mm.notifier_lock); + +out: + if (pvec) { + unpin_user_pages(pvec, pinned); + kvfree(pvec); + } + + return ret; +} + +int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) +{ + if (mmu_interval_read_retry(&obj->userptr.notifier, + obj->userptr.notifier_seq)) { + /* We collided with the mmu notifier, need to retry */ + + return -EAGAIN; + } + + return 0; +} + +void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) +{ + i915_gem_object_userptr_drop_ref(obj); +} + +int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) +{ + int err; + + err = i915_gem_object_userptr_submit_init(obj); + if (err) + return err; + + err = i915_gem_object_lock_interruptible(obj, NULL); + if (!err) { + /* + * Since we only check validity, not use the pages, + * it doesn't matter if we collide with the mmu notifier, + * and -EAGAIN handling is not required. + */ + err = i915_gem_object_pin_pages(obj); + if (!err) + i915_gem_object_unpin_pages(obj); + + i915_gem_object_unlock(obj); + } + + i915_gem_object_userptr_submit_fini(obj); + return err; } static void i915_gem_userptr_release(struct drm_i915_gem_object *obj) { - i915_gem_userptr_release__mmu_notifier(obj); - i915_gem_userptr_release__mm_struct(obj); + GEM_WARN_ON(obj->userptr.page_ref); + + mmu_interval_notifier_remove(&obj->userptr.notifier); + obj->userptr.notifier.mm = NULL; } static int i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) { - if (obj->userptr.mmu_object) - return 0; + drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n"); + + return -EINVAL; +} + +static int +i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pwrite *args) +{ + drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n"); + + return -EINVAL; +} - return i915_gem_userptr_init__mmu_notifier(obj, 0); +static int +i915_gem_userptr_pread(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pread *args) +{ + drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n"); + + return -EINVAL; } static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { .name = "i915_gem_object_userptr", - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE | + .flags = I915_GEM_OBJECT_IS_SHRINKABLE | I915_GEM_OBJECT_NO_MMAP | - I915_GEM_OBJECT_ASYNC_CANCEL, + I915_GEM_OBJECT_IS_PROXY, .get_pages = i915_gem_userptr_get_pages, .put_pages = i915_gem_userptr_put_pages, .dmabuf_export = i915_gem_userptr_dmabuf_export, + .pwrite = i915_gem_userptr_pwrite, + .pread = i915_gem_userptr_pread, .release = i915_gem_userptr_release, }; +#endif + /* * Creates a new mm object that wraps some normal memory from the process * context - user memory. @@ -752,12 +490,12 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - static struct lock_class_key lock_class; + static struct lock_class_key __maybe_unused lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_userptr *args = data; - struct drm_i915_gem_object *obj; - int ret; - u32 handle; + struct drm_i915_gem_object __maybe_unused *obj; + int __maybe_unused ret; + u32 __maybe_unused handle; if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { /* We cannot support coherent userptr objects on hw without @@ -770,21 +508,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, I915_USERPTR_UNSYNCHRONIZED)) return -EINVAL; - /* - * XXX: There is a prevalence of the assumption that we fit the - * object's page count inside a 32bit _signed_ variable. Let's document - * this and catch if we ever need to fix it. In the meantime, if you do - * spot such a local variable, please consider fixing! - * - * Aside from our own locals (for which we have no excuse!): - * - sg_table embeds unsigned int for num_pages - * - get_user_pages*() mixed ints with longs - */ - - if (args->user_size >> PAGE_SHIFT > INT_MAX) - return -E2BIG; - - if (overflows_type(args->user_size, obj->base.size)) + if (i915_gem_object_size_2big(args->user_size)) return -E2BIG; if (!args->user_size) @@ -796,6 +520,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) return -EFAULT; + if (args->flags & I915_USERPTR_UNSYNCHRONIZED) + return -ENODEV; + if (args->flags & I915_USERPTR_READ_ONLY) { /* * On almost all of the older hw, we cannot tell the GPU that @@ -805,17 +532,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENODEV; } +#ifdef CONFIG_MMU_NOTIFIER obj = i915_gem_object_alloc(); if (obj == NULL) return -ENOMEM; drm_gem_private_object_init(dev, &obj->base, args->user_size); - i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class); + i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, + I915_BO_ALLOC_STRUCT_PAGE); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); obj->userptr.ptr = args->user_ptr; + obj->userptr.notifier_seq = ULONG_MAX; if (args->flags & I915_USERPTR_READ_ONLY) i915_gem_object_set_readonly(obj); @@ -823,9 +553,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, * at binding. This means that we need to hook into the mmu_notifier * in order to detect if the mmu is destroyed. */ - ret = i915_gem_userptr_init__mm_struct(obj); - if (ret == 0) - ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); + ret = i915_gem_userptr_init__mmu_notifier(obj); if (ret == 0) ret = drm_gem_handle_create(file, &obj->base, &handle); @@ -836,24 +564,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, args->handle = handle; return 0; +#else + return -ENODEV; +#endif } int i915_gem_init_userptr(struct drm_i915_private *dev_priv) { - spin_lock_init(&dev_priv->mm_lock); - hash_init(dev_priv->mm_structs); - - dev_priv->mm.userptr_wq = - alloc_workqueue("i915-userptr-acquire", - WQ_HIGHPRI | WQ_UNBOUND, - 0); - if (!dev_priv->mm.userptr_wq) - return -ENOMEM; +#ifdef CONFIG_MMU_NOTIFIER + spin_lock_init(&dev_priv->mm.notifier_lock); +#endif return 0; } void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) { - destroy_workqueue(dev_priv->mm.userptr_wq); } |