diff options
Diffstat (limited to 'drivers/xen/gntdev.c')
| -rw-r--r-- | drivers/xen/gntdev.c | 157 | 
1 files changed, 106 insertions, 51 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 59ffea800079..4b56c39f766d 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -35,6 +35,7 @@  #include <linux/slab.h>  #include <linux/highmem.h>  #include <linux/refcount.h> +#include <linux/workqueue.h>  #include <xen/xen.h>  #include <xen/grant_table.h> @@ -60,10 +61,11 @@ module_param(limit, uint, 0644);  MODULE_PARM_DESC(limit,  	"Maximum number of grants that may be mapped by one mapping request"); +/* True in PV mode, false otherwise */  static int use_ptemod; -static int unmap_grant_pages(struct gntdev_grant_map *map, -			     int offset, int pages); +static void unmap_grant_pages(struct gntdev_grant_map *map, +			      int offset, int pages);  static struct miscdevice gntdev_miscdev; @@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)  	kvfree(map->unmap_ops);  	kvfree(map->kmap_ops);  	kvfree(map->kunmap_ops); +	kvfree(map->being_removed);  	kfree(map);  } @@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,  	add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),  					GFP_KERNEL);  	add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); +	add->being_removed = +		kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);  	if (NULL == add->grants    ||  	    NULL == add->map_ops   ||  	    NULL == add->unmap_ops || -	    NULL == add->pages) +	    NULL == add->pages     || +	    NULL == add->being_removed)  		goto err;  	if (use_ptemod) {  		add->kmap_ops   = kvmalloc_array(count, sizeof(add->kmap_ops[0]), @@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)  	if (!refcount_dec_and_test(&map->users))  		return; -	if (map->pages && !use_ptemod) +	if (map->pages && !use_ptemod) { +		/* +		 * Increment the reference count.  This ensures that the +		 * subsequent call to unmap_grant_pages() will not wind up +		 * re-entering itself.  It *can* wind up calling +		 * gntdev_put_map() recursively, but such calls will be with a +		 * reference count greater than 1, so they will return before +		 * this code is reached.  The recursion depth is thus limited to +		 * 1.  Do NOT use refcount_inc() here, as it will detect that +		 * the reference count is zero and WARN(). +		 */ +		refcount_set(&map->users, 1); + +		/* +		 * Unmap the grants.  This may or may not be asynchronous, so it +		 * is possible that the reference count is 1 on return, but it +		 * could also be greater than 1. +		 */  		unmap_grant_pages(map, 0, map->count); +		/* Check if the memory now needs to be freed */ +		if (!refcount_dec_and_test(&map->users)) +			return; + +		/* +		 * All pages have been returned to the hypervisor, so free the +		 * map. +		 */ +	} +  	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {  		notify_remote_via_evtchn(map->notify.event);  		evtchn_put(map->notify.event); @@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)  int gntdev_map_grant_pages(struct gntdev_grant_map *map)  { +	size_t alloced = 0;  	int i, err = 0;  	if (!use_ptemod) { @@ -331,97 +365,116 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)  			map->count);  	for (i = 0; i < map->count; i++) { -		if (map->map_ops[i].status == GNTST_okay) +		if (map->map_ops[i].status == GNTST_okay) {  			map->unmap_ops[i].handle = map->map_ops[i].handle; -		else if (!err) +			if (!use_ptemod) +				alloced++; +		} else if (!err)  			err = -EINVAL;  		if (map->flags & GNTMAP_device_map)  			map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;  		if (use_ptemod) { -			if (map->kmap_ops[i].status == GNTST_okay) +			if (map->kmap_ops[i].status == GNTST_okay) { +				if (map->map_ops[i].status == GNTST_okay) +					alloced++;  				map->kunmap_ops[i].handle = map->kmap_ops[i].handle; -			else if (!err) +			} else if (!err)  				err = -EINVAL;  		}  	} +	atomic_add(alloced, &map->live_grants);  	return err;  } -static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, -			       int pages) +static void __unmap_grant_pages_done(int result, +		struct gntab_unmap_queue_data *data)  { -	int i, err = 0; -	struct gntab_unmap_queue_data unmap_data; - -	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { -		int pgno = (map->notify.addr >> PAGE_SHIFT); -		if (pgno >= offset && pgno < offset + pages) { -			/* No need for kmap, pages are in lowmem */ -			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); -			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; -			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; -		} -	} - -	unmap_data.unmap_ops = map->unmap_ops + offset; -	unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; -	unmap_data.pages = map->pages + offset; -	unmap_data.count = pages; - -	err = gnttab_unmap_refs_sync(&unmap_data); -	if (err) -		return err; +	unsigned int i; +	struct gntdev_grant_map *map = data->data; +	unsigned int offset = data->unmap_ops - map->unmap_ops; -	for (i = 0; i < pages; i++) { -		if (map->unmap_ops[offset+i].status) -			err = -EINVAL; +	for (i = 0; i < data->count; i++) { +		WARN_ON(map->unmap_ops[offset+i].status);  		pr_debug("unmap handle=%d st=%d\n",  			map->unmap_ops[offset+i].handle,  			map->unmap_ops[offset+i].status);  		map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;  		if (use_ptemod) { -			if (map->kunmap_ops[offset+i].status) -				err = -EINVAL; +			WARN_ON(map->kunmap_ops[offset+i].status);  			pr_debug("kunmap handle=%u st=%d\n",  				 map->kunmap_ops[offset+i].handle,  				 map->kunmap_ops[offset+i].status);  			map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;  		}  	} -	return err; +	/* +	 * Decrease the live-grant counter.  This must happen after the loop to +	 * prevent premature reuse of the grants by gnttab_mmap(). +	 */ +	atomic_sub(data->count, &map->live_grants); + +	/* Release reference taken by __unmap_grant_pages */ +	gntdev_put_map(NULL, map); +} + +static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset, +			       int pages) +{ +	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { +		int pgno = (map->notify.addr >> PAGE_SHIFT); + +		if (pgno >= offset && pgno < offset + pages) { +			/* No need for kmap, pages are in lowmem */ +			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); + +			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; +			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; +		} +	} + +	map->unmap_data.unmap_ops = map->unmap_ops + offset; +	map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; +	map->unmap_data.pages = map->pages + offset; +	map->unmap_data.count = pages; +	map->unmap_data.done = __unmap_grant_pages_done; +	map->unmap_data.data = map; +	refcount_inc(&map->users); /* to keep map alive during async call below */ + +	gnttab_unmap_refs_async(&map->unmap_data);  } -static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, -			     int pages) +static void unmap_grant_pages(struct gntdev_grant_map *map, int offset, +			      int pages)  { -	int range, err = 0; +	int range; + +	if (atomic_read(&map->live_grants) == 0) +		return; /* Nothing to do */  	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);  	/* It is possible the requested range will have a "hole" where we  	 * already unmapped some of the grants. Only unmap valid ranges.  	 */ -	while (pages && !err) { -		while (pages && -		       map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) { +	while (pages) { +		while (pages && map->being_removed[offset]) {  			offset++;  			pages--;  		}  		range = 0;  		while (range < pages) { -			if (map->unmap_ops[offset + range].handle == -			    INVALID_GRANT_HANDLE) +			if (map->being_removed[offset + range])  				break; +			map->being_removed[offset + range] = true;  			range++;  		} -		err = __unmap_grant_pages(map, offset, range); +		if (range) +			__unmap_grant_pages(map, offset, range);  		offset += range;  		pages -= range;  	} - -	return err;  }  /* ------------------------------------------------------------------ */ @@ -473,7 +526,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,  	struct gntdev_grant_map *map =  		container_of(mn, struct gntdev_grant_map, notifier);  	unsigned long mstart, mend; -	int err;  	if (!mmu_notifier_range_blockable(range))  		return false; @@ -494,10 +546,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,  			map->index, map->count,  			map->vma->vm_start, map->vma->vm_end,  			range->start, range->end, mstart, mend); -	err = unmap_grant_pages(map, +	unmap_grant_pages(map,  				(mstart - map->vma->vm_start) >> PAGE_SHIFT,  				(mend - mstart) >> PAGE_SHIFT); -	WARN_ON(err);  	return true;  } @@ -985,6 +1036,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)  		goto unlock_out;  	if (use_ptemod && map->vma)  		goto unlock_out; +	if (atomic_read(&map->live_grants)) { +		err = -EAGAIN; +		goto unlock_out; +	}  	refcount_inc(&map->users);  	vma->vm_ops = &gntdev_vmops;  | 
