diff options
-rw-r--r-- | drivers/vdpa/Kconfig | 1 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_sim/vdpa_sim.c | 108 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_sim/vdpa_sim.h | 2 |
3 files changed, 69 insertions, 42 deletions
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig index a245809c99d0..6e82a0e228c2 100644 --- a/drivers/vdpa/Kconfig +++ b/drivers/vdpa/Kconfig @@ -14,6 +14,7 @@ config VDPA_SIM depends on RUNTIME_TESTING_MENU && HAS_DMA select DMA_OPS select VHOST_RING + select IOMMU_IOVA help Enable this module to support vDPA device simulators. These devices are used for testing, prototyping and development of vDPA. diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 5b6b2f87d40c..fc2ec9599753 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -17,6 +17,7 @@ #include <linux/vringh.h> #include <linux/vdpa.h> #include <linux/vhost_iotlb.h> +#include <linux/iova.h> #include "vdpa_sim.h" @@ -128,30 +129,57 @@ static int dir_to_perm(enum dma_data_direction dir) return perm; } +static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr, + size_t size, unsigned int perm) +{ + struct iova *iova; + dma_addr_t dma_addr; + int ret; + + /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */ + iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true); + if (!iova) + return DMA_MAPPING_ERROR; + + dma_addr = iova_dma_addr(&vdpasim->iova, iova); + + spin_lock(&vdpasim->iommu_lock); + ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr, + (u64)dma_addr + size - 1, (u64)paddr, perm); + spin_unlock(&vdpasim->iommu_lock); + + if (ret) { + __free_iova(&vdpasim->iova, iova); + return DMA_MAPPING_ERROR; + } + + return dma_addr; +} + +static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr, + size_t size) +{ + spin_lock(&vdpasim->iommu_lock); + vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr, + (u64)dma_addr + size - 1); + spin_unlock(&vdpasim->iommu_lock); + + free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr)); +} + static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset; - int ret, perm = dir_to_perm(dir); + phys_addr_t paddr = page_to_phys(page) + offset; + int perm = dir_to_perm(dir); if (perm < 0) return DMA_MAPPING_ERROR; - /* For simplicity, use identical mapping to avoid e.g iova - * allocator. - */ - spin_lock(&vdpasim->iommu_lock); - ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, - pa, dir_to_perm(dir)); - spin_unlock(&vdpasim->iommu_lock); - if (ret) - return DMA_MAPPING_ERROR; - - return (dma_addr_t)(pa); + return vdpasim_map_range(vdpasim, paddr, size, perm); } static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, @@ -159,12 +187,8 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - spin_lock(&vdpasim->iommu_lock); - vhost_iotlb_del_range(iommu, (u64)dma_addr, - (u64)dma_addr + size - 1); - spin_unlock(&vdpasim->iommu_lock); + vdpasim_unmap_range(vdpasim, dma_addr, size); } static void *vdpasim_alloc_coherent(struct device *dev, size_t size, @@ -172,27 +196,22 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - void *addr = kmalloc(size, flag); - int ret; + phys_addr_t paddr; + void *addr; - spin_lock(&vdpasim->iommu_lock); + addr = kmalloc(size, flag); if (!addr) { *dma_addr = DMA_MAPPING_ERROR; - } else { - u64 pa = virt_to_phys(addr); - - ret = vhost_iotlb_add_range(iommu, (u64)pa, - (u64)pa + size - 1, - pa, VHOST_MAP_RW); - if (ret) { - *dma_addr = DMA_MAPPING_ERROR; - kfree(addr); - addr = NULL; - } else - *dma_addr = (dma_addr_t)pa; + return NULL; + } + + paddr = virt_to_phys(addr); + + *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW); + if (*dma_addr == DMA_MAPPING_ERROR) { + kfree(addr); + return NULL; } - spin_unlock(&vdpasim->iommu_lock); return addr; } @@ -202,14 +221,10 @@ static void vdpasim_free_coherent(struct device *dev, size_t size, unsigned long attrs) { struct vdpasim *vdpasim = dev_to_sim(dev); - struct vhost_iotlb *iommu = vdpasim->iommu; - spin_lock(&vdpasim->iommu_lock); - vhost_iotlb_del_range(iommu, (u64)dma_addr, - (u64)dma_addr + size - 1); - spin_unlock(&vdpasim->iommu_lock); + vdpasim_unmap_range(vdpasim, dma_addr, size); - kfree(phys_to_virt((uintptr_t)dma_addr)); + kfree(vaddr); } static const struct dma_map_ops vdpasim_dma_ops = { @@ -271,6 +286,13 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr) for (i = 0; i < dev_attr->nvqs; i++) vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu); + ret = iova_cache_get(); + if (ret) + goto err_iommu; + + /* For simplicity we use an IOVA allocator with byte granularity */ + init_iova_domain(&vdpasim->iova, 1, 0); + vdpasim->vdpa.dma_dev = dev; return vdpasim; @@ -541,6 +563,8 @@ static void vdpasim_free(struct vdpa_device *vdpa) struct vdpasim *vdpasim = vdpa_to_sim(vdpa); cancel_work_sync(&vdpasim->work); + put_iova_domain(&vdpasim->iova); + iova_cache_put(); kvfree(vdpasim->buffer); if (vdpasim->iommu) vhost_iotlb_free(vdpasim->iommu); diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h index 6d75444f9948..cd58e888bcf3 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.h +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h @@ -6,6 +6,7 @@ #ifndef _VDPA_SIM_H #define _VDPA_SIM_H +#include <linux/iova.h> #include <linux/vringh.h> #include <linux/vdpa.h> #include <linux/virtio_byteorder.h> @@ -57,6 +58,7 @@ struct vdpasim { /* virtio config according to device type */ void *config; struct vhost_iotlb *iommu; + struct iova_domain iova; void *buffer; u32 status; u32 generation; |