summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2014-11-02 13:36:05 -0800
committerOlof Johansson <olof@lixom.net>2014-11-02 13:37:07 -0800
commit4257412db57900e43716d0b7ddd4f4a51e6ed2f4 (patch)
tree759963245a484422e9ad2639cb223b53f844ff15 /arch/arm64/mm
parentcc040ba269ae6972face1dc7376ab3eaab9f64c8 (diff)
parent4b91f7f3c8b20e073b7bfc098625b37f99789508 (diff)
Merge tag 'fixes-against-v3.18-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
Merge "omap fixes against v3.18-rc2" from Tony Lindgren: Few fixes for omaps to enable NAND BCH so devices won't produce errors when booted with omap2plus_defconfig, and reduce bloat by making IPV6 a loadable module. Also let's add a warning about legacy boot being deprecated for omap3. We now have things working with device tree, and only omap3 is still booting in legacy mode. So hopefully this warning will help move the remaining legacy mode users to boot with device tree. As the total reduction of code and static data is somewhere around 20000 lines of code once we remove omap3 legacy mode booting, we really do want to make omap3 to boot also in device tree mode only over the next few merge cycles. * tag 'fixes-against-v3.18-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (407 commits) ARM: OMAP2+: Warn about deprecated legacy booting mode ARM: omap2plus_defconfig: Fix errors with NAND BCH ARM: omap2plus_defconfig: Fix bloat caused by having ipv6 built-in + Linux 3.18-rc2 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/ioremap.c4
-rw-r--r--arch/arm64/mm/mmu.c12
-rw-r--r--arch/arm64/mm/pgd.c18
3 files changed, 26 insertions, 8 deletions
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index fa324bd5a5c4..4a07630a6616 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -105,10 +105,10 @@ EXPORT_SYMBOL(ioremap_cache);
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
-static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
-static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
#endif
static inline pud_t * __init early_ioremap_pud(unsigned long addr)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6894ef3e6234..0bf90d26e745 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -297,11 +297,15 @@ static void __init map_mem(void)
* create_mapping requires puds, pmds and ptes to be allocated from
* memory addressable from the initial direct kernel mapping.
*
- * The initial direct kernel mapping, located at swapper_pg_dir,
- * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
- * aligned to 2MB as per Documentation/arm64/booting.txt).
+ * The initial direct kernel mapping, located at swapper_pg_dir, gives
+ * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
+ * PHYS_OFFSET (which must be aligned to 2MB as per
+ * Documentation/arm64/booting.txt).
*/
- limit = PHYS_OFFSET + PUD_SIZE;
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ limit = PHYS_OFFSET + PMD_SIZE;
+ else
+ limit = PHYS_OFFSET + PUD_SIZE;
memblock_set_current_limit(limit);
/* map all the memory banks */
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 62c6101df260..6682b361d3ac 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -30,12 +30,14 @@
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+static struct kmem_cache *pgd_cache;
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (PGD_SIZE == PAGE_SIZE)
return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- return kzalloc(PGD_SIZE, GFP_KERNEL);
+ return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
if (PGD_SIZE == PAGE_SIZE)
free_page((unsigned long)pgd);
else
- kfree(pgd);
+ kmem_cache_free(pgd_cache, pgd);
+}
+
+static int __init pgd_cache_init(void)
+{
+ /*
+ * Naturally aligned pgds required by the architecture.
+ */
+ if (PGD_SIZE != PAGE_SIZE)
+ pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+ SLAB_PANIC, NULL);
+ return 0;
}
+core_initcall(pgd_cache_init);