diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 51 | 
1 files changed, 50 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ac65bb5e38ac..dd8737a94bec 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -27,6 +27,7 @@  #include <linux/swapops.h>  #include <linux/jhash.h>  #include <linux/numa.h> +#include <linux/llist.h>  #include <asm/page.h>  #include <asm/pgtable.h> @@ -1136,7 +1137,7 @@ static inline void ClearPageHugeTemporary(struct page *page)  	page[2].mapping = NULL;  } -void free_huge_page(struct page *page) +static void __free_huge_page(struct page *page)  {  	/*  	 * Can't pass hstate in here because it is called from the @@ -1199,6 +1200,54 @@ void free_huge_page(struct page *page)  	spin_unlock(&hugetlb_lock);  } +/* + * As free_huge_page() can be called from a non-task context, we have + * to defer the actual freeing in a workqueue to prevent potential + * hugetlb_lock deadlock. + * + * free_hpage_workfn() locklessly retrieves the linked list of pages to + * be freed and frees them one-by-one. As the page->mapping pointer is + * going to be cleared in __free_huge_page() anyway, it is reused as the + * llist_node structure of a lockless linked list of huge pages to be freed. + */ +static LLIST_HEAD(hpage_freelist); + +static void free_hpage_workfn(struct work_struct *work) +{ +	struct llist_node *node; +	struct page *page; + +	node = llist_del_all(&hpage_freelist); + +	while (node) { +		page = container_of((struct address_space **)node, +				     struct page, mapping); +		node = node->next; +		__free_huge_page(page); +	} +} +static DECLARE_WORK(free_hpage_work, free_hpage_workfn); + +void free_huge_page(struct page *page) +{ +	/* +	 * Defer freeing if in non-task context to avoid hugetlb_lock deadlock. +	 */ +	if (!in_task()) { +		/* +		 * Only call schedule_work() if hpage_freelist is previously +		 * empty. Otherwise, schedule_work() had been called but the +		 * workfn hasn't retrieved the list yet. +		 */ +		if (llist_add((struct llist_node *)&page->mapping, +			      &hpage_freelist)) +			schedule_work(&free_hpage_work); +		return; +	} + +	__free_huge_page(page); +} +  static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)  {  	INIT_LIST_HEAD(&page->lru);  | 
