diff options
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/Kconfig | 20 | ||||
| -rw-r--r-- | drivers/base/Makefile | 2 | ||||
| -rw-r--r-- | drivers/base/dma-buf.c | 34 | ||||
| -rw-r--r-- | drivers/base/dma-contiguous.c | 119 | ||||
| -rw-r--r-- | drivers/base/node.c | 6 |
5 files changed, 82 insertions, 99 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 5daa2599ed48..e373671652b0 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -200,11 +200,9 @@ config DMA_SHARED_BUFFER APIs extension; the file's descriptor can then be passed on to other driver. -config CMA - bool "Contiguous Memory Allocator" - depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK - select MIGRATION - select MEMORY_ISOLATION +config DMA_CMA + bool "DMA Contiguous Memory Allocator" + depends on HAVE_DMA_CONTIGUOUS && CMA help This enables the Contiguous Memory Allocator which allows drivers to allocate big physically-contiguous blocks of memory for use with @@ -213,17 +211,7 @@ config CMA For more information see <include/linux/dma-contiguous.h>. If unsure, say "n". -if CMA - -config CMA_DEBUG - bool "CMA debug messages (DEVELOPMENT)" - depends on DEBUG_KERNEL - help - Turns on debug messages in CMA. This produces KERN_DEBUG - messages for every CMA call as well as various messages while - processing calls such as dma_alloc_from_contiguous(). - This option does not affect warning and error messages. - +if DMA_CMA comment "Default contiguous memory area size:" config CMA_SIZE_MBYTES diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 48029aa477d9..94e8a80e87f8 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -6,7 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \ attribute_container.o transport_class.o \ topology.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o -obj-$(CONFIG_CMA) += dma-contiguous.o +obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 6687ba741879..1e16cbd61da2 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -77,9 +77,36 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) return dmabuf->ops->mmap(dmabuf, vma); } +static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) +{ + struct dma_buf *dmabuf; + loff_t base; + + if (!is_dma_buf_file(file)) + return -EBADF; + + dmabuf = file->private_data; + + /* only support discovering the end of the buffer, + but also allow SEEK_SET to maintain the idiomatic + SEEK_END(0), SEEK_CUR(0) pattern */ + if (whence == SEEK_END) + base = dmabuf->size; + else if (whence == SEEK_SET) + base = 0; + else + return -EINVAL; + + if (offset != 0) + return -EINVAL; + + return base + offset; +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, + .llseek = dma_buf_llseek, }; /* @@ -133,7 +160,12 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, dmabuf->exp_name = exp_name; file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); + if (IS_ERR(file)) { + kfree(dmabuf); + return ERR_CAST(file); + } + file->f_mode |= FMODE_LSEEK; dmabuf->file = file; mutex_init(&dmabuf->lock); @@ -680,7 +712,7 @@ int dma_buf_debugfs_create_file(const char *name, d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, write, &dma_buf_debug_fops); - return PTR_RET(d); + return PTR_ERR_OR_ZERO(d); } #else static inline int dma_buf_init_debugfs(void) diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 6c9cdaa9200d..99802d6f3c60 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) #endif /** - * dma_contiguous_reserve() - reserve area for contiguous memory handling + * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be @@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit) #endif } - if (selected_size) { + if (selected_size && !dma_contiguous_default_area) { pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); - dma_declare_contiguous(NULL, selected_size, 0, limit); + dma_contiguous_reserve_area(selected_size, 0, limit, + &dma_contiguous_default_area); } }; static DEFINE_MUTEX(cma_mutex); -static int __init cma_activate_area(unsigned long base_pfn, unsigned long count) +static int __init cma_activate_area(struct cma *cma) { - unsigned long pfn = base_pfn; - unsigned i = count >> pageblock_order; + int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); + unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; + unsigned i = cma->count >> pageblock_order; struct zone *zone; + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) + return -ENOMEM; + WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); @@ -153,92 +160,53 @@ static int __init cma_activate_area(unsigned long base_pfn, unsigned long count) } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); - return 0; -} - -static struct cma * __init cma_create_area(unsigned long base_pfn, - unsigned long count) -{ - int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); - struct cma *cma; - int ret = -ENOMEM; - - pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count); - - cma = kmalloc(sizeof *cma, GFP_KERNEL); - if (!cma) - return ERR_PTR(-ENOMEM); - - cma->base_pfn = base_pfn; - cma->count = count; - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!cma->bitmap) - goto no_mem; - - ret = cma_activate_area(base_pfn, count); - if (ret) - goto error; - - pr_debug("%s: returned %p\n", __func__, (void *)cma); - return cma; - -error: - kfree(cma->bitmap); -no_mem: - kfree(cma); - return ERR_PTR(ret); + return 0; } -static struct cma_reserved { - phys_addr_t start; - unsigned long size; - struct device *dev; -} cma_reserved[MAX_CMA_AREAS] __initdata; -static unsigned cma_reserved_count __initdata; +static struct cma cma_areas[MAX_CMA_AREAS]; +static unsigned cma_area_count; static int __init cma_init_reserved_areas(void) { - struct cma_reserved *r = cma_reserved; - unsigned i = cma_reserved_count; - - pr_debug("%s()\n", __func__); + int i; - for (; i; --i, ++r) { - struct cma *cma; - cma = cma_create_area(PFN_DOWN(r->start), - r->size >> PAGE_SHIFT); - if (!IS_ERR(cma)) - dev_set_cma_area(r->dev, cma); + for (i = 0; i < cma_area_count; i++) { + int ret = cma_activate_area(&cma_areas[i]); + if (ret) + return ret; } + return 0; } core_initcall(cma_init_reserved_areas); /** - * dma_declare_contiguous() - reserve area for contiguous memory handling - * for particular device - * @dev: Pointer to device structure. - * @size: Size of the reserved memory. - * @base: Start address of the reserved memory (optional, 0 for any). + * dma_contiguous_reserve_area() - reserve custom contiguous area + * @size: Size of the reserved area (in bytes), + * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). + * @res_cma: Pointer to store the created cma region. * - * This function reserves memory for specified device. It should be - * called by board specific code when early allocator (memblock or bootmem) - * is still activate. + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas for specific + * devices. */ -int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, - phys_addr_t base, phys_addr_t limit) +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma) { - struct cma_reserved *r = &cma_reserved[cma_reserved_count]; + struct cma *cma = &cma_areas[cma_area_count]; phys_addr_t alignment; + int ret = 0; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, (unsigned long)size, (unsigned long)base, (unsigned long)limit); /* Sanity checks */ - if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { + if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, if (base) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { - base = -EBUSY; + ret = -EBUSY; goto err; } } else { @@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, */ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); if (!addr) { - base = -ENOMEM; + ret = -ENOMEM; goto err; } else { base = addr; @@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ - r->start = base; - r->size = size; - r->dev = dev; - cma_reserved_count++; + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + *res_cma = cma; + cma_area_count++; + pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, (unsigned long)base); @@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, return 0; err: pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); - return base; + return ret; } /** diff --git a/drivers/base/node.c b/drivers/base/node.c index 7616a77ca322..bc9f43bf7e29 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -125,13 +125,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(nid, NR_WRITEBACK)), nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - nid, K(node_page_state(nid, NR_ANON_PAGES) - + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR), -#else nid, K(node_page_state(nid, NR_ANON_PAGES)), -#endif nid, K(node_page_state(nid, NR_SHMEM)), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, |
