From 2ad654bc5e2b211e92f66da1d819e47d79a866f0 Mon Sep 17 00:00:00 2001 From: Zefan Li Date: Thu, 25 Sep 2014 09:41:02 +0800 Subject: cpuset: PF_SPREAD_PAGE and PF_SPREAD_SLAB should be atomic flags When we change cpuset.memory_spread_{page,slab}, cpuset will flip PF_SPREAD_{PAGE,SLAB} bit of tsk->flags for each task in that cpuset. This should be done using atomic bitops, but currently we don't, which is broken. Tetsuo reported a hard-to-reproduce kernel crash on RHEL6, which happened when one thread tried to clear PF_USED_MATH while at the same time another thread tried to flip PF_SPREAD_PAGE/PF_SPREAD_SLAB. They both operate on the same task. Here's the full report: https://lkml.org/lkml/2014/9/19/230 To fix this, we make PF_SPREAD_PAGE and PF_SPREAD_SLAB atomic flags. v4: - updated mm/slab.c. (Fengguang Wu) - updated Documentation. Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Miao Xie Cc: Kees Cook Fixes: 950592f7b991 ("cpusets: update tasks' page/slab spread flags in time") Cc: # 2.6.31+ Reported-by: Tetsuo Handa Signed-off-by: Zefan Li Signed-off-by: Tejun Heo --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index a467b308c682..881951e67f12 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2994,7 +2994,7 @@ out: #ifdef CONFIG_NUMA /* - * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set. + * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. * * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. @@ -3226,7 +3226,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) { void *objp; - if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) { + if (current->mempolicy || cpuset_do_slab_mem_spread()) { objp = alternate_node_alloc(cache, flags); if (objp) goto out; -- cgit v1.2.3-70-g09d2 From d4a5fca592b9ab52b90bb261a90af3c8f53be011 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 25 Sep 2014 16:05:20 -0700 Subject: mm, slab: initialize object alignment on cache creation Since commit 4590685546a3 ("mm/sl[aou]b: Common alignment code"), the "ralign" automatic variable in __kmem_cache_create() may be used as uninitialized. The proper alignment defaults to BYTES_PER_WORD and can be overridden by SLAB_RED_ZONE or the alignment specified by the caller. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=85031 Signed-off-by: David Rientjes Reported-by: Andrei Elovikov Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: Joonsoo Kim Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index a467b308c682..b8b619bc84ad 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2124,7 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) int __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { - size_t left_over, freelist_size, ralign; + size_t left_over, freelist_size; + size_t ralign = BYTES_PER_WORD; gfp_t gfp; int err; size_t size = cachep->size; @@ -2157,14 +2158,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size &= ~(BYTES_PER_WORD - 1); } - /* - * Redzoning and user store require word alignment or possibly larger. - * Note this will be overridden by architecture or caller mandated - * alignment if either is greater than BYTES_PER_WORD. - */ - if (flags & SLAB_STORE_USER) - ralign = BYTES_PER_WORD; - if (flags & SLAB_RED_ZONE) { ralign = REDZONE_ALIGN; /* If redzoning, ensure that the second redzone is suitably -- cgit v1.2.3-70-g09d2 From dbab31aa2ceec2d201966fa0b552f151310ba5f4 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Thu, 25 Sep 2014 16:05:29 -0700 Subject: mm: softdirty: keep bit when zapping file pte This fixes the same bug as b43790eedd31 ("mm: softdirty: don't forget to save file map softdiry bit on unmap") and 9aed8614af5a ("mm/memory.c: don't forget to set softdirty on file mapped fault") where the return value of pte_*mksoft_dirty was being ignored. To be sure that no other pte/pmd "mk" function return values were being ignored, I annotated the functions in arch/x86/include/asm/pgtable.h with __must_check and rebuilt. The userspace effect of this bug is that the softdirty mark might be lost if a file mapped pte get zapped. Signed-off-by: Peter Feiner Acked-by: Cyrill Gorcunov Cc: Pavel Emelyanov Cc: Jamie Liu Cc: Hugh Dickins Cc: [3.12+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index d17f1bcd2a91..e229970e4223 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1127,7 +1127,7 @@ again: addr) != page->index) { pte_t ptfile = pgoff_to_pte(page->index); if (pte_soft_dirty(ptent)) - pte_file_mksoft_dirty(ptfile); + ptfile = pte_file_mksoft_dirty(ptfile); set_pte_at(mm, addr, pte, ptfile); } if (PageAnon(page)) -- cgit v1.2.3-70-g09d2 From b928095b0a7cff7fb9fcf4c706348ceb8ab2c295 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 24 Sep 2014 17:56:17 +0200 Subject: shmem: fix nlink for rename overwrite directory If overwriting an empty directory with rename, then need to drop the extra nlink. Test prog: #include #include #include #include int main(void) { const char *test_dir1 = "test-dir1"; const char *test_dir2 = "test-dir2"; int res; int fd; struct stat statbuf; res = mkdir(test_dir1, 0777); if (res == -1) err(1, "mkdir(\"%s\")", test_dir1); res = mkdir(test_dir2, 0777); if (res == -1) err(1, "mkdir(\"%s\")", test_dir2); fd = open(test_dir2, O_RDONLY); if (fd == -1) err(1, "open(\"%s\")", test_dir2); res = rename(test_dir1, test_dir2); if (res == -1) err(1, "rename(\"%s\", \"%s\")", test_dir1, test_dir2); res = fstat(fd, &statbuf); if (res == -1) err(1, "fstat(%i)", fd); if (statbuf.st_nlink != 0) { fprintf(stderr, "nlink is %lu, should be 0\n", statbuf.st_nlink); return 1; } return 0; } Signed-off-by: Miklos Szeredi Cc: stable@vger.kernel.org Signed-off-by: Al Viro --- mm/shmem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index 0e5fb225007c..469f90d56051 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2367,8 +2367,10 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc if (new_dentry->d_inode) { (void) shmem_unlink(new_dir, new_dentry); - if (they_are_dirs) + if (they_are_dirs) { + drop_nlink(new_dentry->d_inode); drop_nlink(old_dir); + } } else if (they_are_dirs) { drop_nlink(old_dir); inc_nlink(new_dir); -- cgit v1.2.3-70-g09d2 From 2c80929c4c4d54e568b07ab85877d5fd38f4b02f Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 24 Sep 2014 17:09:11 +0200 Subject: fuse: honour max_read and max_write in direct_io mode The third argument of fuse_get_user_pages() "nbytesp" refers to the number of bytes a caller asked to pack into fuse request. This value may be lesser than capacity of fuse request or iov_iter. So fuse_get_user_pages() must ensure that *nbytesp won't grow. Now, when helper iov_iter_get_pages() performs all hard work of extracting pages from iov_iter, it can be done by passing properly calculated "maxsize" to the helper. The other caller of iov_iter_get_pages() (dio_refill_pages()) doesn't need this capability, so pass LONG_MAX as the maxsize argument here. Fixes: c9c37e2e6378 ("fuse: switch to iov_iter_get_pages()") Reported-by: Werner Baumann Tested-by: Maxim Patlasov Signed-off-by: Miklos Szeredi Signed-off-by: Al Viro --- fs/direct-io.c | 2 +- fs/fuse/file.c | 1 + include/linux/uio.h | 2 +- mm/iov_iter.c | 14 +++++++++----- 4 files changed, 12 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/fs/direct-io.c b/fs/direct-io.c index c3116404ab49..e181b6b2e297 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -158,7 +158,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) { ssize_t ret; - ret = iov_iter_get_pages(sdio->iter, dio->pages, DIO_PAGES, + ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, &sdio->from); if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 912061ac4baf..caa8d95b24e8 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1305,6 +1305,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, size_t start; ssize_t ret = iov_iter_get_pages(ii, &req->pages[req->num_pages], + *nbytesp - nbytes, req->max_pages - req->num_pages, &start); if (ret < 0) diff --git a/include/linux/uio.h b/include/linux/uio.h index 48d64e6ab292..290fbf0b6b8a 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -84,7 +84,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, - unsigned maxpages, size_t *start); + size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); diff --git a/mm/iov_iter.c b/mm/iov_iter.c index ab88dc0ea1d3..9a09f2034fcc 100644 --- a/mm/iov_iter.c +++ b/mm/iov_iter.c @@ -310,7 +310,7 @@ void iov_iter_init(struct iov_iter *i, int direction, EXPORT_SYMBOL(iov_iter_init); static ssize_t get_pages_iovec(struct iov_iter *i, - struct page **pages, unsigned maxpages, + struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { size_t offset = i->iov_offset; @@ -323,6 +323,8 @@ static ssize_t get_pages_iovec(struct iov_iter *i, len = iov->iov_len - offset; if (len > i->count) len = i->count; + if (len > maxsize) + len = maxsize; addr = (unsigned long)iov->iov_base + offset; len += *start = addr & (PAGE_SIZE - 1); if (len > maxpages * PAGE_SIZE) @@ -588,13 +590,15 @@ static unsigned long alignment_bvec(const struct iov_iter *i) } static ssize_t get_pages_bvec(struct iov_iter *i, - struct page **pages, unsigned maxpages, + struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { const struct bio_vec *bvec = i->bvec; size_t len = bvec->bv_len - i->iov_offset; if (len > i->count) len = i->count; + if (len > maxsize) + len = maxsize; /* can't be more than PAGE_SIZE */ *start = bvec->bv_offset + i->iov_offset; @@ -711,13 +715,13 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) EXPORT_SYMBOL(iov_iter_alignment); ssize_t iov_iter_get_pages(struct iov_iter *i, - struct page **pages, unsigned maxpages, + struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { if (i->type & ITER_BVEC) - return get_pages_bvec(i, pages, maxpages, start); + return get_pages_bvec(i, pages, maxsize, maxpages, start); else - return get_pages_iovec(i, pages, maxpages, start); + return get_pages_iovec(i, pages, maxsize, maxpages, start); } EXPORT_SYMBOL(iov_iter_get_pages); -- cgit v1.2.3-70-g09d2