diff options
| author | Christophe Leroy <christophe.leroy@c-s.fr> | 2018-11-29 14:06:51 +0000 | 
|---|---|---|
| committer | Michael Ellerman <mpe@ellerman.id.au> | 2018-12-04 19:45:01 +1100 | 
| commit | a95d133c8643cae40a35f82184640d312ba9f11f (patch) | |
| tree | 309c980a1ab8a30278676edce2cd01ab64bb5634 /arch/powerpc/mm/pgtable-frag.c | |
| parent | ddfc20a3b9ae23b09498a0a7c06a7049b251c158 (diff) | |
powerpc/mm: Move pte_fragment_alloc() to a common location
In preparation of next patch which generalises the use of
pte_fragment_alloc() for all, this patch moves the related functions
in a place that is common to all subarches.
The 8xx will need that for supporting 16k pages, as in that mode
page tables still have a size of 4k.
Since pte_fragment with only once fragment is not different
from what is done in the general case, we can easily migrate all
subarchs to pte fragments.
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/pgtable-frag.c')
| -rw-r--r-- | arch/powerpc/mm/pgtable-frag.c | 116 | 
1 files changed, 116 insertions, 0 deletions
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c new file mode 100644 index 000000000000..d61e7c2a9a79 --- /dev/null +++ b/arch/powerpc/mm/pgtable-frag.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + *  Handling Page Tables through page fragments + * + */ + +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/mm.h> +#include <linux/percpu.h> +#include <linux/hardirq.h> +#include <linux/hugetlb.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm/tlb.h> + +void pte_frag_destroy(void *pte_frag) +{ +	int count; +	struct page *page; + +	page = virt_to_page(pte_frag); +	/* drop all the pending references */ +	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; +	/* We allow PTE_FRAG_NR fragments from a PTE page */ +	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { +		pgtable_page_dtor(page); +		__free_page(page); +	} +} + +static pte_t *get_pte_from_cache(struct mm_struct *mm) +{ +	void *pte_frag, *ret; + +	spin_lock(&mm->page_table_lock); +	ret = mm->context.pte_frag; +	if (ret) { +		pte_frag = ret + PTE_FRAG_SIZE; +		/* +		 * If we have taken up all the fragments mark PTE page NULL +		 */ +		if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) +			pte_frag = NULL; +		mm->context.pte_frag = pte_frag; +	} +	spin_unlock(&mm->page_table_lock); +	return (pte_t *)ret; +} + +static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) +{ +	void *ret = NULL; +	struct page *page; + +	if (!kernel) { +		page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); +		if (!page) +			return NULL; +		if (!pgtable_page_ctor(page)) { +			__free_page(page); +			return NULL; +		} +	} else { +		page = alloc_page(PGALLOC_GFP); +		if (!page) +			return NULL; +	} + +	atomic_set(&page->pt_frag_refcount, 1); + +	ret = page_address(page); +	/* +	 * if we support only one fragment just return the +	 * allocated page. +	 */ +	if (PTE_FRAG_NR == 1) +		return ret; +	spin_lock(&mm->page_table_lock); +	/* +	 * If we find pgtable_page set, we return +	 * the allocated page with single fragement +	 * count. +	 */ +	if (likely(!mm->context.pte_frag)) { +		atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); +		mm->context.pte_frag = ret + PTE_FRAG_SIZE; +	} +	spin_unlock(&mm->page_table_lock); + +	return (pte_t *)ret; +} + +pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) +{ +	pte_t *pte; + +	pte = get_pte_from_cache(mm); +	if (pte) +		return pte; + +	return __alloc_for_ptecache(mm, kernel); +} + +void pte_fragment_free(unsigned long *table, int kernel) +{ +	struct page *page = virt_to_page(table); + +	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); +	if (atomic_dec_and_test(&page->pt_frag_refcount)) { +		if (!kernel) +			pgtable_page_dtor(page); +		__free_page(page); +	} +}  | 
