diff options
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 35 | 
1 files changed, 21 insertions, 14 deletions
diff --git a/mm/memory.c b/mm/memory.c index 649e7d440bd7..c6565f00fb38 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2137,17 +2137,24 @@ reuse:  		if (!dirty_page)  			return ret; -		/* -		 * Yes, Virginia, this is actually required to prevent a race -		 * with clear_page_dirty_for_io() from clearing the page dirty -		 * bit after it clear all dirty ptes, but before a racing -		 * do_wp_page installs a dirty pte. -		 * -		 * do_shared_fault is protected similarly. -		 */  		if (!page_mkwrite) { -			wait_on_page_locked(dirty_page); -			set_page_dirty_balance(dirty_page); +			struct address_space *mapping; +			int dirtied; + +			lock_page(dirty_page); +			dirtied = set_page_dirty(dirty_page); +			VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page); +			mapping = dirty_page->mapping; +			unlock_page(dirty_page); + +			if (dirtied && mapping) { +				/* +				 * Some device drivers do not set page.mapping +				 * but still dirty their pages +				 */ +				balance_dirty_pages_ratelimited(mapping); +			} +  			/* file_update_time outside page_lock */  			if (vma->vm_file)  				file_update_time(vma->vm_file); @@ -2378,12 +2385,12 @@ void unmap_mapping_range(struct address_space *mapping,  		details.last_index = ULONG_MAX; -	i_mmap_lock_read(mapping); +	i_mmap_lock_write(mapping);  	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))  		unmap_mapping_range_tree(&mapping->i_mmap, &details);  	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))  		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); -	i_mmap_unlock_read(mapping); +	i_mmap_unlock_write(mapping);  }  EXPORT_SYMBOL(unmap_mapping_range); @@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo  		if (prev && prev->vm_end == address)  			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; -		expand_downwards(vma, address - PAGE_SIZE); +		return expand_downwards(vma, address - PAGE_SIZE);  	}  	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {  		struct vm_area_struct *next = vma->vm_next; @@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo  		if (next && next->vm_start == address + PAGE_SIZE)  			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; -		expand_upwards(vma, address + PAGE_SIZE); +		return expand_upwards(vma, address + PAGE_SIZE);  	}  	return 0;  }  | 
