diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-04 10:50:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-04 10:50:24 -0700 |
commit | 4a2d057e4fc4f9ebd32351837c14c10a0773b956 (patch) | |
tree | b0ed0187a6839ebed4982fe7589b410222fd4b9a /include | |
parent | 9735a22799b9214d17d3c231fe377fc852f042e9 (diff) | |
parent | 1fa64f198b9f8d6ec0f7aec7c18dc94684391140 (diff) |
Merge branch 'PAGE_CACHE_SIZE-removal'
Merge PAGE_CACHE_SIZE removal patches from Kirill Shutemov:
"PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
Let's stop pretending that pages in page cache are special. They are
not.
The first patch with most changes has been done with coccinelle. The
second is manual fixups on top.
The third patch removes macros definition"
[ I was planning to apply this just before rc2, but then I spaced out,
so here it is right _after_ rc2 instead.
As Kirill suggested as a possibility, I could have decided to only
merge the first two patches, and leave the old interfaces for
compatibility, but I'd rather get it all done and any out-of-tree
modules and patches can trivially do the converstion while still also
working with older kernels, so there is little reason to try to
maintain the redundant legacy model. - Linus ]
* PAGE_CACHE_SIZE-removal:
mm: drop PAGE_CACHE_* and page_cache_{get,release} definition
mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/backing-dev-defs.h | 2 | ||||
-rw-r--r-- | include/linux/bio.h | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 | ||||
-rw-r--r-- | include/linux/buffer_head.h | 4 | ||||
-rw-r--r-- | include/linux/ceph/libceph.h | 4 | ||||
-rw-r--r-- | include/linux/f2fs_fs.h | 4 | ||||
-rw-r--r-- | include/linux/fs.h | 4 | ||||
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | include/linux/mm_types.h | 2 | ||||
-rw-r--r-- | include/linux/nfs_page.h | 6 | ||||
-rw-r--r-- | include/linux/nilfs2_fs.h | 4 | ||||
-rw-r--r-- | include/linux/pagemap.h | 32 | ||||
-rw-r--r-- | include/linux/sunrpc/svc.h | 2 | ||||
-rw-r--r-- | include/linux/swap.h | 4 |
14 files changed, 29 insertions, 45 deletions
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 1b4d69f68c33..3f103076d0bf 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -135,7 +135,7 @@ struct bdi_writeback { struct backing_dev_info { struct list_head bdi_list; - unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ + unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ unsigned int capabilities; /* Device capabilities */ congested_fn *congested_fn; /* Function pointer if device is md/dm */ void *congested_data; /* Pointer to aux data for congested func */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 88bc64f00bb5..6b7481f62218 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -41,7 +41,7 @@ #endif #define BIO_MAX_PAGES 256 -#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) +#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT) #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7e5d7e018bea..669e419d6234 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1372,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); static inline void put_dev_sector(Sector p) { - page_cache_release(p.v); + put_page(p.v); } static inline bool __bvec_gap_to_prev(struct request_queue *q, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c67f052cc5e5..d48daa3f6f20 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -43,7 +43,7 @@ enum bh_state_bits { */ }; -#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) +#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) struct page; struct buffer_head; @@ -263,7 +263,7 @@ void buffer_init(void); static inline void attach_page_buffers(struct page *page, struct buffer_head *head) { - page_cache_get(page); + get_page(page); SetPagePrivate(page); set_page_private(page, (unsigned long)head); } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index e7975e4681e1..db92a8d4926e 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc); */ static inline int calc_pages_for(u64 off, u64 len) { - return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - - (off >> PAGE_CACHE_SHIFT); + return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) - + (off >> PAGE_SHIFT); } extern struct kmem_cache *ceph_inode_cachep; diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 9eb215a155e0..b90e9bdbd1dd 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -262,7 +262,7 @@ struct f2fs_node { /* * For NAT entries */ -#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) +#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ @@ -282,7 +282,7 @@ struct f2fs_nat_block { * Not allow to change this. */ #define SIT_VBLOCK_MAP_SIZE 64 -#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) +#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) /* * Note that f2fs_sit_entry->vblocks has the following bit-field information. diff --git a/include/linux/fs.h b/include/linux/fs.h index 14a97194b34b..304991a80e23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -929,7 +929,7 @@ static inline struct file *get_file(struct file *f) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) #endif @@ -2067,7 +2067,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int); /* /sys/fs */ extern struct kobject *fs_kobj; -#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) +#define MAX_RW_COUNT (INT_MAX & PAGE_MASK) #ifdef CONFIG_MANDATORY_FILE_LOCKING extern int locks_mandatory_locked(struct file *); diff --git a/include/linux/mm.h b/include/linux/mm.h index ed6407d1b7b5..ffcff53e3b2b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, * * A page may belong to an inode's memory mapping. In this case, page->mapping * is the pointer to the inode, and page->index is the file offset of the page, - * in units of PAGE_CACHE_SIZE. + * in units of PAGE_SIZE. * * If pagecache pages are not associated with an inode, they are said to be * anonymous pages. These may become associated with the swapcache, and in that diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 944b2b37313b..c2d75b4fa86c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -341,7 +341,7 @@ struct vm_area_struct { /* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE - units, *not* PAGE_CACHE_SIZE */ + units */ struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index f2f650f136ee..957049f72290 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -41,8 +41,8 @@ struct nfs_page { struct page *wb_page; /* page to read in/write out */ struct nfs_open_context *wb_context; /* File state context info */ struct nfs_lock_context *wb_lock_context; /* lock context info */ - pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ - unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ + pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ + unsigned int wb_offset, /* Offset & ~PAGE_MASK */ wb_pgbase, /* Start of page data */ wb_bytes; /* Length of request */ struct kref wb_kref; /* reference count */ @@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head) static inline loff_t req_offset(struct nfs_page *req) { - return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; + return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset; } #endif /* _LINUX_NFS_PAGE_H */ diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 9abb763e4b86..e9fcf90b270d 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) { unsigned len = le16_to_cpu(dlen); -#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) +#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == NILFS_MAX_REC_LEN) return 1 << 16; #endif @@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) static inline __le16 nilfs_rec_len_to_disk(unsigned len) { -#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) +#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == (1 << 16)) return cpu_to_le16(NILFS_MAX_REC_LEN); else if (len > (1 << 16)) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1ebd65c91422..7e1ab155c67c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) (__force unsigned long)mask; } -/* - * The page cache can be done in larger chunks than - * one page, because it allows for more efficient - * throughput (it can then be mapped into user - * space in smaller chunks for same flexibility). - * - * Or rather, it _will_ be done in larger chunks. - */ -#define PAGE_CACHE_SHIFT PAGE_SHIFT -#define PAGE_CACHE_SIZE PAGE_SIZE -#define PAGE_CACHE_MASK PAGE_MASK -#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) - -#define page_cache_get(page) get_page(page) -#define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, bool cold); /* @@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page) return page->index << compound_order(page); if (likely(!PageTransTail(page))) - return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + return page->index; /* * We don't initialize ->index for tail pages: calculate based on * head page */ - pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff = compound_head(page)->index; pgoff += page - compound_head(page); return pgoff; } @@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page) */ static inline loff_t page_offset(struct page *page) { - return ((loff_t)page->index) << PAGE_CACHE_SHIFT; + return ((loff_t)page->index) << PAGE_SHIFT; } static inline loff_t page_file_offset(struct page *page) { - return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; + return ((loff_t)page_file_index(page)) << PAGE_SHIFT; } extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, @@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; - return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); + return pgoff; } extern void __lock_page(struct page *page); @@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* * Fault a userspace page into pagetables. Return non-zero on a fault. * - * This assumes that two userspace pages are always sufficient. That's - * not true if PAGE_CACHE_SIZE > PAGE_SIZE. + * This assumes that two userspace pages are always sufficient. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { @@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page, static inline unsigned long dir_pages(struct inode *inode) { - return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; } #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index cc0fc712bb82..7ca44fb5b675 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv) * * These happen to all be powers of 2, which is not strictly * necessary but helps enforce the real limitation, which is - * that they should be multiples of PAGE_CACHE_SIZE. + * that they should be multiples of PAGE_SIZE. * * For UDP transports, a block plus NFS,RPC, and UDP headers * has to fit into the IP datagram limit of 64K. The largest diff --git a/include/linux/swap.h b/include/linux/swap.h index d18b65c53dbb..2b83359c19ca 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -433,9 +433,9 @@ struct backing_dev_info; #define si_swapinfo(val) \ do { (val)->freeswap = (val)->totalswap = 0; } while (0) /* only sparc can not include linux/pagemap.h in this file - * so leave page_cache_release and release_pages undeclared... */ + * so leave put_page and release_pages undeclared... */ #define free_page_and_swap_cache(page) \ - page_cache_release(page) + put_page(page) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr), false); |