diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2023-05-01 15:20:08 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2023-05-01 15:20:08 -0700 | 
| commit | 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e (patch) | |
| tree | d57f3a63479a07b4e0cece029886e76e04feb984 /include/linux/rmap.h | |
| parent | 5dc63e56a9cf8df0b59c234a505a1653f1bdf885 (diff) | |
| parent | 53bea86b5712c7491bb3dae12e271666df0a308c (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.4 merge window.
Diffstat (limited to 'include/linux/rmap.h')
| -rw-r--r-- | include/linux/rmap.h | 15 | 
1 files changed, 12 insertions, 3 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bd3504d11b15..b87d01660412 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -94,7 +94,7 @@ enum ttu_flags {  	TTU_SPLIT_HUGE_PMD	= 0x4,	/* split huge PMD if any */  	TTU_IGNORE_MLOCK	= 0x8,	/* ignore mlock */  	TTU_SYNC		= 0x10,	/* avoid racy checks with PVMW_SYNC */ -	TTU_IGNORE_HWPOISON	= 0x20,	/* corrupted page is recoverable */ +	TTU_HWPOISON		= 0x20,	/* do convert pte to hwpoison entry */  	TTU_BATCH_FLUSH		= 0x40,	/* Batch TLB flushes where possible  					 * and caller guarantees they will  					 * do a final flush if necessary */ @@ -194,6 +194,8 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *,  		unsigned long address, rmap_t flags);  void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,  		unsigned long address); +void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, +		unsigned long address);  void page_add_file_rmap(struct page *, struct vm_area_struct *,  		bool compound);  void page_remove_rmap(struct page *, struct vm_area_struct *, @@ -201,12 +203,19 @@ void page_remove_rmap(struct page *, struct vm_area_struct *,  void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,  		unsigned long address, rmap_t flags); -void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, +void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,  		unsigned long address);  static inline void __page_dup_rmap(struct page *page, bool compound)  { -	atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); +	if (compound) { +		struct folio *folio = (struct folio *)page; + +		VM_BUG_ON_PAGE(compound && !PageHead(page), page); +		atomic_inc(&folio->_entire_mapcount); +	} else { +		atomic_inc(&page->_mapcount); +	}  }  static inline void page_dup_file_rmap(struct page *page, bool compound)  | 
