diff options
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 4 | ||||
-rw-r--r-- | include/linux/cma.h | 4 | ||||
-rw-r--r-- | security/Kconfig | 2 |
4 files changed, 7 insertions, 5 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4b61541853ea..82ffac621854 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -381,6 +381,7 @@ out: */ postcore_initcall(atomic_pool_init); +#ifdef CONFIG_CMA_AREAS struct dma_contig_early_reserve { phys_addr_t base; unsigned long size; @@ -435,6 +436,7 @@ void __init dma_contiguous_remap(void) iotable_init(&map, 1); } } +#endif static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) { diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 9ff683612f2a..d7ffccb7fea7 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -88,6 +88,10 @@ extern phys_addr_t arm_lowmem_limit; void __init bootmem_init(void); void arm_mm_memblock_reserve(void); +#ifdef CONFIG_CMA_AREAS void dma_contiguous_remap(void); +#else +static inline void dma_contiguous_remap(void) { } +#endif unsigned long __clear_cr(unsigned long mask); diff --git a/include/linux/cma.h b/include/linux/cma.h index 90fd742fd1ef..a6f637342740 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -12,10 +12,6 @@ */ #ifdef CONFIG_CMA_AREAS #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) - -#else -#define MAX_CMA_AREAS (0) - #endif #define CMA_MAX_NAME 64 diff --git a/security/Kconfig b/security/Kconfig index 1d2d71cc1f36..9b2c4925585a 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -166,7 +166,7 @@ config HARDENED_USERCOPY config HARDENED_USERCOPY_PAGESPAN bool "Refuse to copy allocations that span multiple pages" depends on HARDENED_USERCOPY - depends on EXPERT + depends on BROKEN help When a multi-page allocation is done without __GFP_COMP, hardened usercopy will reject attempts to copy it. There are, |