diff options
author | Christoph Hellwig <hch@lst.de> | 2021-10-18 13:18:34 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2021-12-07 12:47:05 +0100 |
commit | 4d0564785bb03841e4b5c5b31aa4ecd1eb0d01bb (patch) | |
tree | fe13ba7259e2e93432cd32a1417e858b3ef90a7c /kernel/dma | |
parent | 0fcfb00b28c0b7884635dacf38e46d60bf3d4eb1 (diff) |
dma-direct: factor out dma_set_{de,en}crypted helpers
Factor out helpers the make dealing with memory encryption a little less
cumbersome.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to 'kernel/dma')
-rw-r--r-- | kernel/dma/direct.c | 56 |
1 files changed, 25 insertions, 31 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 4c6c5e0635e3..d4d54af31a34 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); } +static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) +{ + if (!force_dma_unencrypted(dev)) + return 0; + return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size)); +} + +static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) +{ + if (!force_dma_unencrypted(dev)) + return 0; + return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size)); +} + static void __dma_direct_free_pages(struct device *dev, struct page *page, size_t size) { @@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size, { struct page *page; void *ret; - int err; size = PAGE_ALIGN(size); if (attrs & DMA_ATTR_NO_WARN) @@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, __builtin_return_address(0)); if (!ret) goto out_free_pages; - if (force_dma_unencrypted(dev)) { - err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); - if (err) - goto out_free_pages; - } + if (dma_set_decrypted(dev, ret, size)) + goto out_free_pages; memset(ret, 0, size); goto done; } @@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, } ret = page_address(page); - if (force_dma_unencrypted(dev)) { - err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); - if (err) - goto out_free_pages; - } - + if (dma_set_decrypted(dev, ret, size)) + goto out_free_pages; memset(ret, 0, size); if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && @@ -259,13 +263,9 @@ done: return ret; out_encrypt_pages: - if (force_dma_unencrypted(dev)) { - err = set_memory_encrypted((unsigned long)page_address(page), - 1 << get_order(size)); - /* If memory cannot be re-encrypted, it must be leaked */ - if (err) - return NULL; - } + /* If memory cannot be re-encrypted, it must be leaked */ + if (dma_set_encrypted(dev, page_address(page), size)) + return NULL; out_free_pages: __dma_direct_free_pages(dev, page, size); return NULL; @@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size, dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) return; - if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); + dma_set_encrypted(dev, cpu_addr, 1 << page_order); if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); @@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, } ret = page_address(page); - if (force_dma_unencrypted(dev)) { - if (set_memory_decrypted((unsigned long)ret, - 1 << get_order(size))) - goto out_free_pages; - } + if (dma_set_decrypted(dev, ret, size)) + goto out_free_pages; memset(ret, 0, size); *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); return page; @@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, dma_free_from_pool(dev, vaddr, size)) return; - if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)vaddr, 1 << page_order); - + dma_set_encrypted(dev, vaddr, 1 << page_order); __dma_direct_free_pages(dev, page, size); } |